import os from fastapi import FastAPI, File, UploadFile from pydantic import BaseModel from transformers import pipeline from PIL import Image import joblib import re import string import io import uvicorn # ✅ Define Hugging Face Cache Directory CACHE_DIR = "./hf_cache" os.makedirs(CACHE_DIR, exist_ok=True) # ✅ Initialize FastAPI app = FastAPI() # ✅ Load NSFW Image Classification Model with custom cache pipe = pipeline("image-classification", model="LukeJacob2023/nsfw-image-detector", cache_dir=CACHE_DIR) # ✅ Load Toxic Text Classification Model try: model = joblib.load("toxic_classifier.pkl") vectorizer = joblib.load("vectorizer.pkl") print("✅ Model & Vectorizer Loaded Successfully!") except Exception as e: print(f"❌ Error: {e}") exit(1) # 📌 Text Input Data Model class TextInput(BaseModel): text: str # 🔹 Text Preprocessing Function def preprocess_text(text): text = text.lower() text = re.sub(r'\d+', '', text) # Remove numbers text = text.translate(str.maketrans('', '', string.punctuation)) # Remove punctuation return text.strip() # 📌 NSFW Image Classification API @app.post("/classify_image/") async def classify_image(file: UploadFile = File(...)): try: image = Image.open(io.BytesIO(await file.read())) results = pipe(image) classification_label = max(results, key=lambda x: x['score'])['label'] nsfw_labels = {"sexy", "porn", "hentai"} nsfw_status = "NSFW" if classification_label in nsfw_labels else "SFW" return {"status": nsfw_status, "results": results} except Exception as e: return {"error": str(e)} # 📌 Toxic Text Classification API @app.post("/classify_text/") async def classify_text(data: TextInput): try: processed_text = preprocess_text(data.text) text_vectorized = vectorizer.transform([processed_text]) prediction = model.predict(text_vectorized) result = "Toxic" if prediction[0] == 1 else "Safe" return {"prediction": result} except Exception as e: return {"error": str(e)} # ✅ Run FastAPI on Hugging Face if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=7860)