vaibhaviiii28 commited on
Commit
a12bf3c
Β·
verified Β·
1 Parent(s): 9b4ae8f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -34
app.py CHANGED
@@ -1,5 +1,4 @@
1
- from fastapi import FastAPI
2
- from flask import Flask, request, jsonify
3
  from pydantic import BaseModel
4
  from transformers import pipeline
5
  from PIL import Image
@@ -7,15 +6,18 @@ import joblib
7
  import re
8
  import string
9
  import io
 
10
  import uvicorn
11
- from threading import Thread
12
 
13
- # Initialize Flask & FastAPI
14
- app = Flask(__name__)
15
- api = FastAPI()
16
 
17
- # βœ… Load NSFW Image Classification Model
18
- pipe = pipeline("image-classification", model="LukeJacob2023/nsfw-image-detector")
 
 
 
19
 
20
  # βœ… Load Toxic Text Classification Model
21
  try:
@@ -37,24 +39,23 @@ def preprocess_text(text):
37
  text = text.translate(str.maketrans('', '', string.punctuation)) # Remove punctuation
38
  return text.strip()
39
 
40
- # πŸ“Œ NSFW Image Classification API (Flask)
41
- @app.route('/classify_image', methods=['POST'])
42
- def classify_image():
43
- if 'file' not in request.files:
44
- return jsonify({"error": "No file uploaded"}), 400
45
-
46
- file = request.files['file']
47
- image = Image.open(io.BytesIO(file.read()))
48
- results = pipe(image)
49
-
50
- classification_label = max(results, key=lambda x: x['score'])['label']
51
- nsfw_labels = {"sexy", "porn", "hentai"}
52
- nsfw_status = "NSFW" if classification_label in nsfw_labels else "SFW"
53
 
54
- return jsonify({"status": nsfw_status, "results": results})
 
 
55
 
56
- # πŸ“Œ Toxic Text Classification API (FastAPI)
57
- @api.post("/classify_text/")
58
  async def classify_text(data: TextInput):
59
  try:
60
  processed_text = preprocess_text(data.text)
@@ -65,14 +66,6 @@ async def classify_text(data: TextInput):
65
  except Exception as e:
66
  return {"error": str(e)}
67
 
68
- # πŸ”₯ Run both servers using Gunicorn
69
- def run_flask():
70
- app.run(host="0.0.0.0", port=5000)
71
-
72
- def run_fastapi():
73
- uvicorn.run(api, host="0.0.0.0", port=8000)
74
-
75
  if __name__ == "__main__":
76
- Thread(target=run_flask).start()
77
- Thread(target=run_fastapi).start()
78
-
 
1
+ from fastapi import FastAPI, File, UploadFile
 
2
  from pydantic import BaseModel
3
  from transformers import pipeline
4
  from PIL import Image
 
6
  import re
7
  import string
8
  import io
9
+ import os
10
  import uvicorn
 
11
 
12
+ # βœ… Set Hugging Face Cache Directory (Fixes Permission Error)
13
+ CACHE_DIR = "/tmp/hf_cache"
14
+ os.makedirs(CACHE_DIR, exist_ok=True)
15
 
16
+ # βœ… Initialize FastAPI
17
+ app = FastAPI()
18
+
19
+ # βœ… Load NSFW Image Classification Model (with custom cache directory)
20
+ pipe = pipeline("image-classification", model="LukeJacob2023/nsfw-image-detector", cache_dir=CACHE_DIR)
21
 
22
  # βœ… Load Toxic Text Classification Model
23
  try:
 
39
  text = text.translate(str.maketrans('', '', string.punctuation)) # Remove punctuation
40
  return text.strip()
41
 
42
+ # πŸ“Œ NSFW Image Classification API
43
+ @app.post("/classify_image/")
44
+ async def classify_image(file: UploadFile = File(...)):
45
+ try:
46
+ image = Image.open(io.BytesIO(await file.read()))
47
+ results = pipe(image)
48
+
49
+ classification_label = max(results, key=lambda x: x['score'])['label']
50
+ nsfw_labels = {"sexy", "porn", "hentai"}
51
+ nsfw_status = "NSFW" if classification_label in nsfw_labels else "SFW"
 
 
 
52
 
53
+ return {"status": nsfw_status, "results": results}
54
+ except Exception as e:
55
+ return {"error": str(e)}
56
 
57
+ # πŸ“Œ Toxic Text Classification API
58
+ @app.post("/classify_text/")
59
  async def classify_text(data: TextInput):
60
  try:
61
  processed_text = preprocess_text(data.text)
 
66
  except Exception as e:
67
  return {"error": str(e)}
68
 
69
+ # βœ… Run FastAPI using Uvicorn (Hugging Face requires port 7860)
 
 
 
 
 
 
70
  if __name__ == "__main__":
71
+ uvicorn.run(app, host="0.0.0.0", port=7860)