Spaces:
Sleeping
Sleeping
fahmiaziz98
commited on
Commit
·
c034083
1
Parent(s):
a60e140
frist commit
Browse files- router/disaster.py +10 -1
- router/image_clf.py +14 -12
- router/sentiment.py +10 -1
- router/upload_image.py +0 -8
- scripts/data_model.py +3 -3
- scripts/s3.py +22 -0
- utils/log.py +9 -0
router/disaster.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import os
|
2 |
import time
|
3 |
-
from typing import Union
|
4 |
from fastapi import APIRouter
|
5 |
from scripts.data_model import ClassificationInput, ClassificationOutput
|
6 |
from utils.pipeline import load_model
|
@@ -17,6 +16,16 @@ MODEL_PATH = os.path.join(BASE_DIR, "ml-models", "tinybert-disaster-tweet/")
|
|
17 |
description="Classify the disaster of a given text using a pre-trained model."
|
18 |
)
|
19 |
def disaster_classification(input: ClassificationInput)-> ClassificationOutput:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
try:
|
21 |
pipe = load_model(MODEL_PATH)
|
22 |
start = time.time()
|
|
|
1 |
import os
|
2 |
import time
|
|
|
3 |
from fastapi import APIRouter
|
4 |
from scripts.data_model import ClassificationInput, ClassificationOutput
|
5 |
from utils.pipeline import load_model
|
|
|
16 |
description="Classify the disaster of a given text using a pre-trained model."
|
17 |
)
|
18 |
def disaster_classification(input: ClassificationInput)-> ClassificationOutput:
|
19 |
+
"""
|
20 |
+
Classify the disaster of a given text using a pre-trained model.
|
21 |
+
|
22 |
+
Args:
|
23 |
+
input (ClassificationInput): The input data containing the user_id and text.
|
24 |
+
|
25 |
+
Returns:
|
26 |
+
ClassificationOutput: The output data containing the user_id, text, model_name, sentiment, score, and prediction_time.
|
27 |
+
|
28 |
+
"""
|
29 |
try:
|
30 |
pipe = load_model(MODEL_PATH)
|
31 |
start = time.time()
|
router/image_clf.py
CHANGED
@@ -1,13 +1,8 @@
|
|
1 |
import os
|
2 |
import time
|
3 |
-
import requests
|
4 |
-
from PIL import Image
|
5 |
-
from io import BytesIO
|
6 |
-
|
7 |
from fastapi import APIRouter, HTTPException
|
8 |
from scripts.data_model import ImageInput, ImageOutput
|
9 |
from utils.pipeline import load_model
|
10 |
-
from utils.log import logger
|
11 |
|
12 |
router = APIRouter()
|
13 |
|
@@ -21,20 +16,27 @@ MODEL_PATH = os.path.join(BASE_DIR, "ml-models", "vit-human-pose-classification/
|
|
21 |
description="Classify the image using a pre-trained model."
|
22 |
)
|
23 |
def image_classification(input: ImageInput)-> ImageOutput:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
try:
|
25 |
pipe = load_model(MODEL_PATH, is_image_model=True)
|
26 |
-
|
27 |
-
|
28 |
-
logger.info(f"Image URLs: {image[-1]}")
|
29 |
|
30 |
start = time.time()
|
31 |
-
output = pipe(
|
32 |
end = time.time()
|
33 |
|
34 |
prediction_time = int((end-start)*1000)
|
35 |
|
36 |
-
labels = [x[
|
37 |
-
scores = [x[
|
38 |
|
39 |
return ImageOutput(
|
40 |
user_id=input.user_id,
|
@@ -46,4 +48,4 @@ def image_classification(input: ImageInput)-> ImageOutput:
|
|
46 |
)
|
47 |
|
48 |
except Exception as e:
|
49 |
-
raise HTTPException(status_code=500, detail=f"Failed to process image classification: {e}")
|
|
|
1 |
import os
|
2 |
import time
|
|
|
|
|
|
|
|
|
3 |
from fastapi import APIRouter, HTTPException
|
4 |
from scripts.data_model import ImageInput, ImageOutput
|
5 |
from utils.pipeline import load_model
|
|
|
6 |
|
7 |
router = APIRouter()
|
8 |
|
|
|
16 |
description="Classify the image using a pre-trained model."
|
17 |
)
|
18 |
def image_classification(input: ImageInput)-> ImageOutput:
|
19 |
+
"""
|
20 |
+
Classify the image using a pre-trained model.
|
21 |
+
|
22 |
+
Args:
|
23 |
+
input (ImageInput): The input data containing the image URL and user ID.
|
24 |
+
|
25 |
+
Returns:
|
26 |
+
ImageOutput: The output data containing the labels, scores, prediction time, and other info.
|
27 |
+
"""
|
28 |
try:
|
29 |
pipe = load_model(MODEL_PATH, is_image_model=True)
|
30 |
+
urls = [str(x) for x in input.url]
|
|
|
|
|
31 |
|
32 |
start = time.time()
|
33 |
+
output = pipe(urls)
|
34 |
end = time.time()
|
35 |
|
36 |
prediction_time = int((end-start)*1000)
|
37 |
|
38 |
+
labels = [x[0]['label'] for x in output]
|
39 |
+
scores = [x[0]['score'] for x in output]
|
40 |
|
41 |
return ImageOutput(
|
42 |
user_id=input.user_id,
|
|
|
48 |
)
|
49 |
|
50 |
except Exception as e:
|
51 |
+
raise HTTPException(status_code=500, detail=f"Failed to process image classification: {e}")
|
router/sentiment.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import os
|
2 |
import time
|
3 |
-
from typing import Union
|
4 |
from fastapi import APIRouter
|
5 |
from scripts.data_model import ClassificationInput, ClassificationOutput
|
6 |
from utils.pipeline import load_model
|
@@ -17,6 +16,16 @@ MODEL_PATH = os.path.join(BASE_DIR, "ml-models", "tinybert-sentiment-analysis/")
|
|
17 |
description="Classify the sentiment of a given text using a pre-trained model."
|
18 |
)
|
19 |
def sentiment_classification(input: ClassificationInput)-> ClassificationOutput:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
try:
|
21 |
pipe = load_model(MODEL_PATH)
|
22 |
start = time.time()
|
|
|
1 |
import os
|
2 |
import time
|
|
|
3 |
from fastapi import APIRouter
|
4 |
from scripts.data_model import ClassificationInput, ClassificationOutput
|
5 |
from utils.pipeline import load_model
|
|
|
16 |
description="Classify the sentiment of a given text using a pre-trained model."
|
17 |
)
|
18 |
def sentiment_classification(input: ClassificationInput)-> ClassificationOutput:
|
19 |
+
"""
|
20 |
+
Classify the sentiment of a given text using a pre-trained model.
|
21 |
+
|
22 |
+
Args:
|
23 |
+
input (ClassificationInput): The input data containing the user_id and text.
|
24 |
+
|
25 |
+
Returns:
|
26 |
+
ClassificationOutput: The output data containing the user_id, text, model_name, sentiment, score, and prediction_time.
|
27 |
+
|
28 |
+
"""
|
29 |
try:
|
30 |
pipe = load_model(MODEL_PATH)
|
31 |
start = time.time()
|
router/upload_image.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
from fastapi import APIRouter
|
2 |
-
from fastapi.responses import FileResponse
|
3 |
-
|
4 |
-
# router = APIRouter()
|
5 |
-
|
6 |
-
# @router.get("/image/{image_name}")
|
7 |
-
# async def get_image(image_name: str):
|
8 |
-
# return FileResponse(f"image/{image_name}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scripts/data_model.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from pydantic import BaseModel
|
2 |
from typing import List
|
3 |
|
4 |
|
@@ -8,7 +8,7 @@ class ClassificationInput(BaseModel):
|
|
8 |
|
9 |
class ImageInput(BaseModel):
|
10 |
user_id: str
|
11 |
-
url: List[
|
12 |
|
13 |
|
14 |
class ClassificationOutput(BaseModel):
|
@@ -21,7 +21,7 @@ class ClassificationOutput(BaseModel):
|
|
21 |
|
22 |
class ImageOutput(BaseModel):
|
23 |
user_id: str
|
24 |
-
url: List[
|
25 |
model_name: str
|
26 |
label: List[str]
|
27 |
score: List[float]
|
|
|
1 |
+
from pydantic import BaseModel, HttpUrl
|
2 |
from typing import List
|
3 |
|
4 |
|
|
|
8 |
|
9 |
class ImageInput(BaseModel):
|
10 |
user_id: str
|
11 |
+
url: List[HttpUrl]
|
12 |
|
13 |
|
14 |
class ClassificationOutput(BaseModel):
|
|
|
21 |
|
22 |
class ImageOutput(BaseModel):
|
23 |
user_id: str
|
24 |
+
url: List[HttpUrl]
|
25 |
model_name: str
|
26 |
label: List[str]
|
27 |
score: List[float]
|
scripts/s3.py
CHANGED
@@ -14,6 +14,16 @@ s3 = boto3.client(
|
|
14 |
)
|
15 |
|
16 |
def download_model_from_s3(local_path: str, s3_prefix: str):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
try:
|
18 |
if os.path.exists(local_path) and os.listdir(local_path):
|
19 |
logger.info(f"Model {local_path} already exists. Skipping download.")
|
@@ -41,6 +51,18 @@ def upload_image_to_s3(
|
|
41 |
s3_prefix="ml-images",
|
42 |
object_name=None
|
43 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
if object_name is None:
|
45 |
object_name = os.path.basename(file_name)
|
46 |
|
|
|
14 |
)
|
15 |
|
16 |
def download_model_from_s3(local_path: str, s3_prefix: str):
|
17 |
+
"""
|
18 |
+
Downloads a model from S3 to the specified local path.
|
19 |
+
|
20 |
+
Args:
|
21 |
+
local_path (str): The local path to download the model to.
|
22 |
+
s3_prefix (str): The S3 prefix of the model to download.
|
23 |
+
|
24 |
+
Raises:
|
25 |
+
RuntimeError: If there is an error downloading the model from S3.
|
26 |
+
"""
|
27 |
try:
|
28 |
if os.path.exists(local_path) and os.listdir(local_path):
|
29 |
logger.info(f"Model {local_path} already exists. Skipping download.")
|
|
|
51 |
s3_prefix="ml-images",
|
52 |
object_name=None
|
53 |
):
|
54 |
+
"""
|
55 |
+
Uploads an image to S3 and returns a presigned URL for the object.
|
56 |
+
|
57 |
+
Args:
|
58 |
+
file_name (str): The file name of the image to upload.
|
59 |
+
s3_prefix (str): The S3 prefix to use for the object name.
|
60 |
+
object_name (str, optional): The object name to use for the S3 key.
|
61 |
+
If not provided, the object name will be the same as the file name.
|
62 |
+
|
63 |
+
Returns:
|
64 |
+
str: The presigned URL for the S3 object.
|
65 |
+
"""
|
66 |
if object_name is None:
|
67 |
object_name = os.path.basename(file_name)
|
68 |
|
utils/log.py
CHANGED
@@ -1,6 +1,15 @@
|
|
1 |
import logging
|
2 |
|
3 |
def get_logger(name: str):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
logger = logging.getLogger(name)
|
5 |
if not logger.handlers:
|
6 |
handler = logging.StreamHandler()
|
|
|
1 |
import logging
|
2 |
|
3 |
def get_logger(name: str):
|
4 |
+
"""
|
5 |
+
Gets a logger with the given name, and configures it if it has never been configured before.
|
6 |
+
|
7 |
+
Args:
|
8 |
+
name (str): The name of the logger to get.
|
9 |
+
|
10 |
+
Returns:
|
11 |
+
A configured logger with the given name.
|
12 |
+
"""
|
13 |
logger = logging.getLogger(name)
|
14 |
if not logger.handlers:
|
15 |
handler = logging.StreamHandler()
|