Spaces:
Sleeping
Sleeping
fahmiaziz98
commited on
Commit
·
db2db2a
1
Parent(s):
4ca551f
frist commit
Browse files- app.py +39 -73
- requirements.txt +3 -0
- router/__init__.py +0 -0
- router/disaster.py +40 -0
- router/image_clf.py +39 -0
- router/sentiment.py +40 -0
- router/upload_image.py +8 -0
- scripts/__init__.py +0 -0
- scripts/data_model.py +28 -0
- scripts/s3.py +61 -0
- utils/__init__.py +0 -0
- utils/logger.py +13 -0
- utils/pipeline.py +20 -0
app.py
CHANGED
@@ -1,79 +1,45 @@
|
|
1 |
-
from typing import Union
|
2 |
-
|
3 |
from fastapi import FastAPI
|
4 |
-
from fastapi import
|
5 |
-
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
|
9 |
@app.get("/")
|
10 |
def read_root():
|
11 |
-
return {"
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
@app.get("/items/{item_id}")
|
16 |
-
def read_item(item_id: int, q: Union[str, None] = None):
|
17 |
-
return {"item_id": item_id, "q": q}
|
18 |
-
|
19 |
-
|
20 |
-
@app.get("/get_sentiment/{text}")
|
21 |
-
def get_sentiment(text: str, user_id: Union[str, None] = None):
|
22 |
-
return {"text": text,
|
23 |
-
"sentiment": "positive",
|
24 |
-
"user_id": user_id}
|
25 |
-
|
26 |
-
@app.get("/get_sentiment_v2/{text}/{ip}")
|
27 |
-
def get_sentiment_v2(text: str, ip: str, user_id: Union[str, None] = None):
|
28 |
-
return {"ip": ip,
|
29 |
-
"text": text,
|
30 |
-
"sentiment": "positive",
|
31 |
-
"user_id": user_id}
|
32 |
-
|
33 |
-
|
34 |
-
@app.post("/get_twitter_sentiment")
|
35 |
-
def get_twitter_sentiment(text: str, ip: str, user_id: Union[str, None] = None):
|
36 |
-
"""
|
37 |
-
Analyze the sentiment of a given text from Twitter.
|
38 |
-
|
39 |
-
Parameters:
|
40 |
-
- text (str): The text to analyze.
|
41 |
-
- ip (str): The IP address of the user.
|
42 |
-
- user_id (Union[str, None]): Optional user ID for tracking.
|
43 |
-
|
44 |
-
Returns:
|
45 |
-
- A JSON object containing the IP, text, sentiment analysis result, and user ID.
|
46 |
-
"""
|
47 |
-
return {
|
48 |
-
"ip": ip,
|
49 |
-
"text": text,
|
50 |
-
"sentiment": "normal", # Placeholder for sentiment analysis
|
51 |
-
"user_id": user_id
|
52 |
-
}
|
53 |
-
|
54 |
-
@app.post("/get_twitter_sentiment_v2")
|
55 |
-
async def get_twitter_sentiment_v2(request: Request):
|
56 |
-
"""
|
57 |
-
Analyze the sentiment of a given text from Twitter asynchronously.
|
58 |
-
|
59 |
-
This endpoint accepts a JSON body with the following fields:
|
60 |
-
- text (str): The text to analyze.
|
61 |
-
- ip (str): The IP address of the user.
|
62 |
-
- user_id (Union[str, None]): Optional user ID for tracking.
|
63 |
-
|
64 |
-
Returns:
|
65 |
-
- A JSON object containing the IP, text, sentiment analysis result, and user ID.
|
66 |
-
"""
|
67 |
-
data = await request.json()
|
68 |
-
|
69 |
-
text: str = data.get('text')
|
70 |
-
ip = data.get('ip')
|
71 |
-
user_id = data.get('user_id')
|
72 |
-
|
73 |
-
return {
|
74 |
-
"ip": ip,
|
75 |
-
"text": text,
|
76 |
-
"sentiment": "normal", # Placeholder for sentiment analysis
|
77 |
-
"user_id": user_id
|
78 |
-
}
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from fastapi import FastAPI
|
2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
3 |
+
from scripts.s3 import download_model_from_s3
|
4 |
+
from router.disaster import router as disaster_router
|
5 |
+
from router.sentiment import router as sentiment_router
|
6 |
+
from router.image_clf import router as image_router
|
7 |
+
from utils.logger import logger
|
8 |
+
|
9 |
+
|
10 |
+
app = FastAPI(
|
11 |
+
title="ML API",
|
12 |
+
description="ML API for sentiment analysis and image classification",
|
13 |
+
version="0.0.1",
|
14 |
+
openapi_url="/openapi.json"
|
15 |
+
)
|
16 |
+
|
17 |
+
app.add_middleware(
|
18 |
+
CORSMiddleware,
|
19 |
+
allow_origins=["*"],
|
20 |
+
allow_credentials=True,
|
21 |
+
allow_methods=["*"],
|
22 |
+
allow_headers=["*"],
|
23 |
+
)
|
24 |
+
|
25 |
+
|
26 |
+
MODEL_PATH = "ml-models/"
|
27 |
+
sentiment_model_path = "tinybert-sentiment-analysis/"
|
28 |
+
disaster_model_path = "tinybert-disaster-tweet/"
|
29 |
+
image_model_path = "vit-human-pose-classification/"
|
30 |
+
|
31 |
+
logger.info("Ensuring models are downloaded...")
|
32 |
+
download_model_from_s3(MODEL_PATH + sentiment_model_path, sentiment_model_path)
|
33 |
+
download_model_from_s3(MODEL_PATH + disaster_model_path, disaster_model_path)
|
34 |
+
download_model_from_s3(MODEL_PATH + image_model_path, image_model_path)
|
35 |
+
logger.info("All models are ready.")
|
36 |
|
37 |
|
38 |
@app.get("/")
|
39 |
def read_root():
|
40 |
+
return {"Status": "Running"}
|
41 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
+
app.include_router(disaster_router, prefix="/api/v1", tags=["Disaster"])
|
44 |
+
app.include_router(sentiment_router, prefix="/api/v1", tags=["Sentiment"])
|
45 |
+
app.include_router(image_router, prefix="/api/v1", tags=["Image"])
|
requirements.txt
CHANGED
@@ -1,2 +1,5 @@
|
|
|
|
|
|
|
|
1 |
fastapi
|
2 |
uvicorn[standard]
|
|
|
1 |
+
boto3
|
2 |
+
torch
|
3 |
+
transformers
|
4 |
fastapi
|
5 |
uvicorn[standard]
|
router/__init__.py
ADDED
File without changes
|
router/disaster.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
from typing import Union
|
4 |
+
from fastapi import APIRouter
|
5 |
+
from scripts.data_model import ClassificationInput, ClassificationOutput
|
6 |
+
from utils.pipeline import load_model
|
7 |
+
|
8 |
+
router = APIRouter()
|
9 |
+
|
10 |
+
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
11 |
+
MODEL_PATH = os.path.join(BASE_DIR, "ml_models", "tinybert-disaster-tweet/")
|
12 |
+
|
13 |
+
@router.post(
|
14 |
+
"/disaster_classification",
|
15 |
+
response_model=ClassificationOutput,
|
16 |
+
summary="Disaster Classification",
|
17 |
+
description="Classify the disaster of a given text using a pre-trained model."
|
18 |
+
)
|
19 |
+
def disaster_classification(input: ClassificationInput)-> ClassificationOutput:
|
20 |
+
try:
|
21 |
+
pipe = load_model(MODEL_PATH)
|
22 |
+
start = time.time()
|
23 |
+
output = pipe(input.text)
|
24 |
+
end = time.time()
|
25 |
+
prediction_time = int((end-start)*1000)
|
26 |
+
|
27 |
+
labels = [x['label'] for x in output]
|
28 |
+
scores = [x['score'] for x in output]
|
29 |
+
|
30 |
+
return ClassificationOutput(
|
31 |
+
user_id=input.user_id,
|
32 |
+
text=input.text,
|
33 |
+
model_name="tinybert-disaster-tweet",
|
34 |
+
sentiment=labels,
|
35 |
+
score=scores,
|
36 |
+
prediction_time=prediction_time
|
37 |
+
)
|
38 |
+
|
39 |
+
except Exception as e:
|
40 |
+
return {"error": f"Failed to process text classification: {str(e)}"}, 500
|
router/image_clf.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
from typing import Union
|
4 |
+
from fastapi import APIRouter
|
5 |
+
from scripts.data_model import ImageInput, ImageOutput
|
6 |
+
from utils.pipeline import load_model
|
7 |
+
|
8 |
+
router = APIRouter()
|
9 |
+
|
10 |
+
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
11 |
+
MODEL_PATH = os.path.join(BASE_DIR, "ml_models", "vit-human-pose-classification/")
|
12 |
+
|
13 |
+
@router.post(
|
14 |
+
"/image_classification",
|
15 |
+
response_model=ImageOutput,
|
16 |
+
summary="Image Classification",
|
17 |
+
description="Classify the image using a pre-trained model."
|
18 |
+
)
|
19 |
+
def image_classification(input: ImageInput)-> ImageOutput:
|
20 |
+
try:
|
21 |
+
pipe = load_model(MODEL_PATH, is_image_model=True)
|
22 |
+
start = time.time()
|
23 |
+
output = pipe(input.url)
|
24 |
+
end = time.time()
|
25 |
+
prediction_time = int((end-start)*1000)
|
26 |
+
|
27 |
+
labels_and_scores = [{"label": x['label'], "score": x['score']} for x in output]
|
28 |
+
|
29 |
+
return ImageOutput(
|
30 |
+
user_id=input.user_id,
|
31 |
+
url=input.url,
|
32 |
+
model_name="vit-human-pose-classification",
|
33 |
+
labels=[x['label'] for x in labels_and_scores],
|
34 |
+
scores=[x['score'] for x in labels_and_scores],
|
35 |
+
prediction_time=prediction_time
|
36 |
+
)
|
37 |
+
|
38 |
+
except Exception as e:
|
39 |
+
return {"error": f"Failed to process text classification: {str(e)}"}, 500
|
router/sentiment.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
from typing import Union
|
4 |
+
from fastapi import APIRouter
|
5 |
+
from scripts.data_model import ClassificationInput, ClassificationOutput
|
6 |
+
from utils.pipeline import load_model
|
7 |
+
|
8 |
+
router = APIRouter()
|
9 |
+
|
10 |
+
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
11 |
+
MODEL_PATH = os.path.join(BASE_DIR, "ml_models", "tinybert-sentiment-analysis/")
|
12 |
+
|
13 |
+
@router.post(
|
14 |
+
"/sentiment_classification",
|
15 |
+
response_model=ClassificationOutput,
|
16 |
+
summary="Sentiment Classification",
|
17 |
+
description="Classify the sentiment of a given text using a pre-trained model."
|
18 |
+
)
|
19 |
+
def sentiment_classification(input: ClassificationInput)-> ClassificationOutput:
|
20 |
+
try:
|
21 |
+
pipe = load_model(MODEL_PATH)
|
22 |
+
start = time.time()
|
23 |
+
output = pipe(input.text)
|
24 |
+
end = time.time()
|
25 |
+
prediction_time = int((end-start)*1000)
|
26 |
+
|
27 |
+
labels = [x['label'] for x in output]
|
28 |
+
scores = [x['score'] for x in output]
|
29 |
+
|
30 |
+
return ClassificationOutput(
|
31 |
+
user_id=input.user_id,
|
32 |
+
text=input.text,
|
33 |
+
model_name="tinybert-sentiment-analysis",
|
34 |
+
sentiment=labels,
|
35 |
+
score=scores,
|
36 |
+
prediction_time=prediction_time
|
37 |
+
)
|
38 |
+
|
39 |
+
except Exception as e:
|
40 |
+
return {"error": f"Failed to process text classification: {str(e)}"}, 500
|
router/upload_image.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter
|
2 |
+
from fastapi.responses import FileResponse
|
3 |
+
|
4 |
+
# router = APIRouter()
|
5 |
+
|
6 |
+
# @router.get("/image/{image_name}")
|
7 |
+
# async def get_image(image_name: str):
|
8 |
+
# return FileResponse(f"image/{image_name}")
|
scripts/__init__.py
ADDED
File without changes
|
scripts/data_model.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import BaseModel
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
|
5 |
+
class ClassificationInput(BaseModel):
|
6 |
+
user_id: str
|
7 |
+
text: List[str]
|
8 |
+
|
9 |
+
class ImageInput(BaseModel):
|
10 |
+
user_id: str
|
11 |
+
url: List[str]
|
12 |
+
|
13 |
+
|
14 |
+
class ClassificationOutput(BaseModel):
|
15 |
+
user_id: str
|
16 |
+
text: List[str]
|
17 |
+
model_name: str
|
18 |
+
sentiment: List[str]
|
19 |
+
score: List[float]
|
20 |
+
prediction_time: int
|
21 |
+
|
22 |
+
class ImageOutput(BaseModel):
|
23 |
+
user_id: str
|
24 |
+
url: List[str]
|
25 |
+
model_name: str
|
26 |
+
label: List[str]
|
27 |
+
score: List[float]
|
28 |
+
prediction_time: int
|
scripts/s3.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import boto3
|
3 |
+
from pathlib import Path
|
4 |
+
from utils import logger
|
5 |
+
|
6 |
+
aws_access_key = os.getenv("AWS_ACCESS_KEY_ID") #"AKIA2ZIOMY3VPQVDJNDG"
|
7 |
+
aws_key_pw = os.getenv("AWS_SECRET_ACCESS_KEY") #"SZ3PQI7hgOEfVztVLUHtqN4L2gh32CeYf3fqHxCc"
|
8 |
+
BUCKET_NAME = os.getenv("BUCKET_NAME") #"mlops-tiny-bert-19032025"
|
9 |
+
s3 = boto3.client(
|
10 |
+
"s3",
|
11 |
+
aws_access_key_id=aws_access_key,
|
12 |
+
aws_secret_access_key=aws_key_pw,
|
13 |
+
)
|
14 |
+
|
15 |
+
|
16 |
+
def download_model_from_s3(local_path: Path, s3_prefix: str):
|
17 |
+
try:
|
18 |
+
if os.path.exists(local_path) and os.listdir(local_path):
|
19 |
+
logger.info(f"Model {local_path} already exists. Skipping download.")
|
20 |
+
return
|
21 |
+
|
22 |
+
logger.info(f"Downloading model from S3: {s3_prefix} to {local_path}")
|
23 |
+
os.makedirs(local_path, exist_ok=True)
|
24 |
+
paginator = s3.get_paginator("list_objects_v2")
|
25 |
+
|
26 |
+
for result in paginator.paginate(Bucket=BUCKET_NAME, Prefix=s3_prefix):
|
27 |
+
if "Contents" in result:
|
28 |
+
for key in result["Contents"]:
|
29 |
+
s3_key = key["Key"]
|
30 |
+
local_file = os.path.join(local_path, os.path.relpath(s3_key, s3_prefix))
|
31 |
+
|
32 |
+
os.makedirs(os.path.dirname(local_file), exist_ok=True)
|
33 |
+
logger.info(f"Downloading {s3_key} to {local_file}")
|
34 |
+
s3.download_file(BUCKET_NAME, s3_key, local_file)
|
35 |
+
logger.info(f"Downloaded {s3_key} to {local_file}")
|
36 |
+
except Exception as e:
|
37 |
+
logger.error(f"Failed to download model from S3: {e}")
|
38 |
+
raise RuntimeError(f"Error downloading model from S3: {e}")
|
39 |
+
|
40 |
+
def upload_image_to_s3(
|
41 |
+
file_name,
|
42 |
+
s3_prefix="ml-images",
|
43 |
+
object_name=None
|
44 |
+
):
|
45 |
+
if object_name is None:
|
46 |
+
object_name = os.path.basename(file_name)
|
47 |
+
|
48 |
+
object_name = f"{s3_prefix}/{object_name}"
|
49 |
+
s3.upload_file(file_name, BUCKET_NAME, object_name)
|
50 |
+
logger.info(f"Uploaded {file_name} to s3://{BUCKET_NAME}/{object_name}")
|
51 |
+
|
52 |
+
response = s3.generate_presigned_url(
|
53 |
+
'get_object',
|
54 |
+
Params={
|
55 |
+
"Bucket": BUCKET_NAME,
|
56 |
+
"Key": object_name
|
57 |
+
},
|
58 |
+
ExpiresIn=3600
|
59 |
+
)
|
60 |
+
logger.info(f"Generated presigned URL for {object_name}: {response}")
|
61 |
+
return response
|
utils/__init__.py
ADDED
File without changes
|
utils/logger.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
|
3 |
+
def get_logger(name: str):
|
4 |
+
logger = logging.getLogger(name)
|
5 |
+
if not logger.handlers:
|
6 |
+
handler = logging.StreamHandler()
|
7 |
+
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
8 |
+
handler.setFormatter(formatter)
|
9 |
+
logger.addHandler(handler)
|
10 |
+
logger.setLevel(logging.INFO)
|
11 |
+
return logger
|
12 |
+
|
13 |
+
logger = get_logger(__name__)
|
utils/pipeline.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import pipeline, AutoImageProcessor
|
3 |
+
|
4 |
+
# Set the device to GPU if available, otherwise use CPU
|
5 |
+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
6 |
+
|
7 |
+
def load_model(local_path, is_image_model=False):
|
8 |
+
"""
|
9 |
+
Load a model from the specified local path.
|
10 |
+
|
11 |
+
Args:
|
12 |
+
local_path (str): The local path to the model.
|
13 |
+
is_image_model (bool): Flag indicating if the model is an image model.
|
14 |
+
Returns:
|
15 |
+
pipeline: The loaded model pipeline.
|
16 |
+
"""
|
17 |
+
if is_image_model:
|
18 |
+
image_processor = AutoImageProcessor.from_pretrained(local_path, use_fast=True)
|
19 |
+
return pipeline("image-classification", model=local_path, device=device, image_processor=image_processor)
|
20 |
+
return pipeline("text-classification", model=local_path, device=device)
|