File size: 2,621 Bytes
77e84bd
a12bf3c
9b4ae8f
 
5fe10ca
9b4ae8f
 
 
 
 
 
be6b2a3
77e84bd
 
 
9b4ae8f
5fe10ca
77e84bd
5fe10ca
77e84bd
 
 
 
 
9b4ae8f
 
 
 
 
77e84bd
9b4ae8f
77e84bd
9b4ae8f
 
5fe10ca
 
 
77e84bd
9b4ae8f
 
 
 
 
 
 
 
 
 
a12bf3c
 
 
 
 
 
 
 
 
 
9b4ae8f
a12bf3c
 
 
9b4ae8f
a12bf3c
 
9b4ae8f
 
 
 
 
 
 
 
 
 
a12bf3c
9b4ae8f
a12bf3c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import os
from fastapi import FastAPI, File, UploadFile
from pydantic import BaseModel
from transformers import pipeline
from huggingface_hub import hf_hub_download
from PIL import Image
import joblib
import re
import string
import io
import uvicorn

# βœ… Set Hugging Face Cache to a writable directory (Fixes Permission Error)
os.environ["TRANSFORMERS_CACHE"] = "/tmp"
os.environ["HF_HOME"] = "/tmp"

# βœ… Manually Download the NSFW Model to `/tmp`
try:
    model_path = hf_hub_download(repo_id="LukeJacob2023/nsfw-image-detector", filename="pytorch_model.bin", cache_dir="/tmp")
    pipe = pipeline("image-classification", model="LukeJacob2023/nsfw-image-detector", cache_dir="/tmp")
    print("βœ… NSFW Model Loaded Successfully!")
except Exception as e:
    print(f"❌ Error Loading NSFW Model: {e}")
    exit(1)

# βœ… Load Toxic Text Classification Model
try:
    model = joblib.load("toxic_classifier.pkl")
    vectorizer = joblib.load("vectorizer.pkl")
    print("βœ… Toxic Text Model & Vectorizer Loaded Successfully!")
except Exception as e:
    print(f"❌ Error Loading Toxic Text Model: {e}")
    exit(1)

# βœ… Initialize FastAPI
app = FastAPI()

# πŸ“Œ Text Input Model
class TextInput(BaseModel):
    text: str

# πŸ”Ή Text Preprocessing Function
def preprocess_text(text):
    text = text.lower()
    text = re.sub(r'\d+', '', text)  # Remove numbers
    text = text.translate(str.maketrans('', '', string.punctuation))  # Remove punctuation
    return text.strip()

# πŸ“Œ NSFW Image Classification API
@app.post("/classify_image/")
async def classify_image(file: UploadFile = File(...)):
    try:
        image = Image.open(io.BytesIO(await file.read()))
        results = pipe(image)

        classification_label = max(results, key=lambda x: x['score'])['label']
        nsfw_labels = {"sexy", "porn", "hentai"}
        nsfw_status = "NSFW" if classification_label in nsfw_labels else "SFW"

        return {"status": nsfw_status, "results": results}
    except Exception as e:
        return {"error": str(e)}

# πŸ“Œ Toxic Text Classification API
@app.post("/classify_text/")
async def classify_text(data: TextInput):
    try:
        processed_text = preprocess_text(data.text)
        text_vectorized = vectorizer.transform([processed_text])
        prediction = model.predict(text_vectorized)
        result = "Toxic" if prediction[0] == 1 else "Safe"
        return {"prediction": result}
    except Exception as e:
        return {"error": str(e)}

# βœ… Run FastAPI using Uvicorn (Hugging Face requires port 7860)
if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=7860)