Spaces:
Runtime error
Runtime error
Commit
·
b7100d8
1
Parent(s):
e915023
Update main.py
Browse files
main.py
CHANGED
@@ -3,53 +3,95 @@ import pandas as pd
|
|
3 |
import transformers as pipeline
|
4 |
from transformers import AutoTokenizer,AutoModelForSequenceClassification
|
5 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
|
6 |
-
import os
|
7 |
-
#os.environ["TRANSFORMERS_CACHE"] = "/path/to/writable/cache/directory"
|
8 |
|
9 |
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
11 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
12 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
13 |
|
|
|
|
|
14 |
app = FastAPI()
|
15 |
|
16 |
@app.get("/")
|
17 |
async def read_root():
|
18 |
-
return {"message": "
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
input_data = [Text]
|
34 |
-
|
35 |
-
input_df = pd.DataFrame([input_data], columns=[
|
36 |
-
"Text"
|
37 |
-
])
|
38 |
-
|
39 |
-
pred = model.predict(input_df)
|
40 |
-
output = classify(pred[0])
|
41 |
|
42 |
response = {
|
43 |
-
"
|
|
|
44 |
}
|
45 |
|
46 |
return response
|
47 |
|
48 |
-
|
49 |
-
if __name__ == "__main__":
|
50 |
import uvicorn
|
51 |
uvicorn.run(app, host="127.0.0.1", port=7860)
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
|
55 |
|
|
|
3 |
import transformers as pipeline
|
4 |
from transformers import AutoTokenizer,AutoModelForSequenceClassification
|
5 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
|
6 |
+
# import os
|
7 |
+
# #os.environ["TRANSFORMERS_CACHE"] = "/path/to/writable/cache/directory"
|
8 |
|
9 |
|
10 |
+
from fastapi import FastAPI, HTTPException, Query
|
11 |
+
|
12 |
+
import transformers
|
13 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
|
14 |
+
|
15 |
+
model_name = "Sonny4Sonnix/twitter-roberta-base-sentimental-analysis-of-covid-tweets"
|
16 |
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
17 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
18 |
|
19 |
+
sentiment = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
|
20 |
+
|
21 |
app = FastAPI()
|
22 |
|
23 |
@app.get("/")
|
24 |
async def read_root():
|
25 |
+
return {"message": "Sentiment Analysis API using FastAPI"}
|
26 |
|
27 |
+
@app.get("/analyze-sentiment/")
|
28 |
+
async def analyze_sentiment(text: str = Query(..., description="Text for sentiment analysis")):
|
29 |
+
result = sentiment(text)
|
30 |
+
sentiment_label = result[0]['label']
|
31 |
+
sentiment_score = result[0]['score']
|
32 |
|
33 |
+
if sentiment_label == 'LABEL_1':
|
34 |
+
sentiment_label = "positive"
|
35 |
+
elif sentiment_label == 'LABEL_0':
|
36 |
+
sentiment_label = "neutral"
|
37 |
+
else:
|
38 |
+
sentiment_label = "negative"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
|
40 |
response = {
|
41 |
+
"sentiment": sentiment_label.capitalize(),
|
42 |
+
"score": sentiment_score
|
43 |
}
|
44 |
|
45 |
return response
|
46 |
|
47 |
+
if _name_ == "_main_":
|
|
|
48 |
import uvicorn
|
49 |
uvicorn.run(app, host="127.0.0.1", port=7860)
|
50 |
+
|
51 |
+
|
52 |
+
# model_name = "Sonny4Sonnix/Movie_Sentiments_Analysis_with_FastAPI" # Replace with the name of the pre-trained model you want to use
|
53 |
+
# model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
54 |
+
# tokenizer = AutoTokenizer.from_pretrained(model_name)
|
55 |
+
|
56 |
+
# app = FastAPI()
|
57 |
+
|
58 |
+
# @app.get("/")
|
59 |
+
# async def read_root():
|
60 |
+
# return {"message": "Welcome to the Sepsis Prediction using FastAPI"}
|
61 |
+
|
62 |
+
# def classify(prediction):
|
63 |
+
# if prediction == 0:
|
64 |
+
# return "Sentence is positive"
|
65 |
+
# else:
|
66 |
+
# return "Sentence is negative"
|
67 |
+
|
68 |
+
|
69 |
+
# @app.post("/predict/")
|
70 |
+
# async def predict_sepsis(
|
71 |
+
# request: Request,
|
72 |
+
# Text: float = Query(..., description="Please type a sentence"),
|
73 |
+
# ):
|
74 |
+
|
75 |
+
# input_data = [Text]
|
76 |
+
|
77 |
+
# input_df = pd.DataFrame([input_data], columns=[
|
78 |
+
# "Text"
|
79 |
+
# ])
|
80 |
+
|
81 |
+
# pred = model.predict(input_df)
|
82 |
+
# output = classify(pred[0])
|
83 |
+
|
84 |
+
# response = {
|
85 |
+
# "prediction": output
|
86 |
+
# }
|
87 |
+
|
88 |
+
# return response
|
89 |
+
|
90 |
+
# # Run the app using Uvicorn
|
91 |
+
# if __name__ == "__main__":
|
92 |
+
# import uvicorn
|
93 |
+
# uvicorn.run(app, host="127.0.0.1", port=7860)
|
94 |
+
# sentiment = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
|
95 |
|
96 |
|
97 |
|