Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
-
from fastapi import FastAPI
|
2 |
-
from flask import Flask, request, jsonify
|
3 |
from pydantic import BaseModel
|
4 |
from transformers import pipeline
|
5 |
from PIL import Image
|
@@ -7,15 +6,18 @@ import joblib
|
|
7 |
import re
|
8 |
import string
|
9 |
import io
|
|
|
10 |
import uvicorn
|
11 |
-
from threading import Thread
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
-
|
16 |
|
17 |
-
# β
|
18 |
-
|
|
|
|
|
|
|
19 |
|
20 |
# β
Load Toxic Text Classification Model
|
21 |
try:
|
@@ -37,24 +39,23 @@ def preprocess_text(text):
|
|
37 |
text = text.translate(str.maketrans('', '', string.punctuation)) # Remove punctuation
|
38 |
return text.strip()
|
39 |
|
40 |
-
# π NSFW Image Classification API
|
41 |
-
@app.
|
42 |
-
def classify_image():
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
classification_label = max(results, key=lambda x: x['score'])['label']
|
51 |
-
nsfw_labels = {"sexy", "porn", "hentai"}
|
52 |
-
nsfw_status = "NSFW" if classification_label in nsfw_labels else "SFW"
|
53 |
|
54 |
-
|
|
|
|
|
55 |
|
56 |
-
# π Toxic Text Classification API
|
57 |
-
@
|
58 |
async def classify_text(data: TextInput):
|
59 |
try:
|
60 |
processed_text = preprocess_text(data.text)
|
@@ -65,14 +66,6 @@ async def classify_text(data: TextInput):
|
|
65 |
except Exception as e:
|
66 |
return {"error": str(e)}
|
67 |
|
68 |
-
#
|
69 |
-
def run_flask():
|
70 |
-
app.run(host="0.0.0.0", port=5000)
|
71 |
-
|
72 |
-
def run_fastapi():
|
73 |
-
uvicorn.run(api, host="0.0.0.0", port=8000)
|
74 |
-
|
75 |
if __name__ == "__main__":
|
76 |
-
|
77 |
-
Thread(target=run_fastapi).start()
|
78 |
-
|
|
|
1 |
+
from fastapi import FastAPI, File, UploadFile
|
|
|
2 |
from pydantic import BaseModel
|
3 |
from transformers import pipeline
|
4 |
from PIL import Image
|
|
|
6 |
import re
|
7 |
import string
|
8 |
import io
|
9 |
+
import os
|
10 |
import uvicorn
|
|
|
11 |
|
12 |
+
# β
Set Hugging Face Cache Directory (Fixes Permission Error)
|
13 |
+
CACHE_DIR = "/tmp/hf_cache"
|
14 |
+
os.makedirs(CACHE_DIR, exist_ok=True)
|
15 |
|
16 |
+
# β
Initialize FastAPI
|
17 |
+
app = FastAPI()
|
18 |
+
|
19 |
+
# β
Load NSFW Image Classification Model (with custom cache directory)
|
20 |
+
pipe = pipeline("image-classification", model="LukeJacob2023/nsfw-image-detector", cache_dir=CACHE_DIR)
|
21 |
|
22 |
# β
Load Toxic Text Classification Model
|
23 |
try:
|
|
|
39 |
text = text.translate(str.maketrans('', '', string.punctuation)) # Remove punctuation
|
40 |
return text.strip()
|
41 |
|
42 |
+
# π NSFW Image Classification API
|
43 |
+
@app.post("/classify_image/")
|
44 |
+
async def classify_image(file: UploadFile = File(...)):
|
45 |
+
try:
|
46 |
+
image = Image.open(io.BytesIO(await file.read()))
|
47 |
+
results = pipe(image)
|
48 |
+
|
49 |
+
classification_label = max(results, key=lambda x: x['score'])['label']
|
50 |
+
nsfw_labels = {"sexy", "porn", "hentai"}
|
51 |
+
nsfw_status = "NSFW" if classification_label in nsfw_labels else "SFW"
|
|
|
|
|
|
|
52 |
|
53 |
+
return {"status": nsfw_status, "results": results}
|
54 |
+
except Exception as e:
|
55 |
+
return {"error": str(e)}
|
56 |
|
57 |
+
# π Toxic Text Classification API
|
58 |
+
@app.post("/classify_text/")
|
59 |
async def classify_text(data: TextInput):
|
60 |
try:
|
61 |
processed_text = preprocess_text(data.text)
|
|
|
66 |
except Exception as e:
|
67 |
return {"error": str(e)}
|
68 |
|
69 |
+
# β
Run FastAPI using Uvicorn (Hugging Face requires port 7860)
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
if __name__ == "__main__":
|
71 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|
|
|
|