vaibhaviiii28 commited on
Commit
6af3c98
Β·
verified Β·
1 Parent(s): d44a6c1

Delete app0.py

Browse files
Files changed (1) hide show
  1. app0.py +0 -71
app0.py DELETED
@@ -1,71 +0,0 @@
1
- import os
2
- from fastapi import FastAPI, File, UploadFile
3
- from pydantic import BaseModel
4
- from transformers import pipeline
5
- from PIL import Image
6
- import joblib
7
- import re
8
- import string
9
- import io
10
- import uvicorn
11
-
12
- # βœ… Define Hugging Face Cache Directory
13
- CACHE_DIR = "./hf_cache"
14
- os.makedirs(CACHE_DIR, exist_ok=True)
15
-
16
- # βœ… Initialize FastAPI
17
- app = FastAPI()
18
-
19
- # βœ… Load NSFW Image Classification Model with custom cache
20
- pipe = pipeline("image-classification", model="LukeJacob2023/nsfw-image-detector", cache_dir=CACHE_DIR)
21
-
22
- # βœ… Load Toxic Text Classification Model
23
- try:
24
- model = joblib.load("toxic_classifier.pkl")
25
- vectorizer = joblib.load("vectorizer.pkl")
26
- print("βœ… Model & Vectorizer Loaded Successfully!")
27
- except Exception as e:
28
- print(f"❌ Error: {e}")
29
- exit(1)
30
-
31
- # πŸ“Œ Text Input Data Model
32
- class TextInput(BaseModel):
33
- text: str
34
-
35
- # πŸ”Ή Text Preprocessing Function
36
- def preprocess_text(text):
37
- text = text.lower()
38
- text = re.sub(r'\d+', '', text) # Remove numbers
39
- text = text.translate(str.maketrans('', '', string.punctuation)) # Remove punctuation
40
- return text.strip()
41
-
42
- # πŸ“Œ NSFW Image Classification API
43
- @app.post("/classify_image/")
44
- async def classify_image(file: UploadFile = File(...)):
45
- try:
46
- image = Image.open(io.BytesIO(await file.read()))
47
- results = pipe(image)
48
-
49
- classification_label = max(results, key=lambda x: x['score'])['label']
50
- nsfw_labels = {"sexy", "porn", "hentai"}
51
- nsfw_status = "NSFW" if classification_label in nsfw_labels else "SFW"
52
-
53
- return {"status": nsfw_status, "results": results}
54
- except Exception as e:
55
- return {"error": str(e)}
56
-
57
- # πŸ“Œ Toxic Text Classification API
58
- @app.post("/classify_text/")
59
- async def classify_text(data: TextInput):
60
- try:
61
- processed_text = preprocess_text(data.text)
62
- text_vectorized = vectorizer.transform([processed_text])
63
- prediction = model.predict(text_vectorized)
64
- result = "Toxic" if prediction[0] == 1 else "Safe"
65
- return {"prediction": result}
66
- except Exception as e:
67
- return {"error": str(e)}
68
-
69
- # βœ… Run FastAPI on Hugging Face
70
- if __name__ == "__main__":
71
- uvicorn.run(app, host="0.0.0.0", port=7860)