thuyentruong's picture
Update app.py
362c8d1 verified
import os
os.system("pip install -q flash_attn==2.7.4.post1 transformers==4.49.0 accelerate>=0.26.0")
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import re
import os
import torch
hf_token = os.environ.get('hf_token')
model_path= 'microsoft/Phi-4-mini-instruct'
model = AutoModelForCausalLM.from_pretrained(
model_path,
#device_map="auto",
#torch_dtype="auto",
trust_remote_code=True
)
tokenizer = AutoTokenizer.from_pretrained(model_path)
def make_prompt(sentence):
prompt = ("""
Given the below sentence(s) can you extract the sentiment and keywords for each sentence:
""" + sentence
)
return prompt
def split_conj(text):
return re.sub('(but|yet|although|however|nevertheless|on the other hand|still|though)', "|", text).split('|')
def get_sentiment_from_llm(review_text):
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
)
generation_args = {
"max_new_tokens": 500,
"return_full_text": False,
"temperature": 0.0,
"do_sample": False,
}
question_and_background = make_prompt(review_text)
messages = [
{"role": "system", "content": "You are a helpful AI assistant who helps to extract sentiments and keywords from given sentences."},
{"role": "user", "content": question_and_background}
]
output = pipe(messages, **generation_args)
print(output)
return output[0]['generated_text']
demo = gr.Blocks()
sentiment_extr = gr.Interface(
fn=get_sentiment_from_llm,
inputs=gr.Textbox(label="Text input", type="text"),
outputs=gr.Textbox(label="Sentiments", type="text"),
title="Sentiment analysis and keywords extraction",
description="""
Enter one or two sentences in the Text Input and click "Submit" to see the sentiments extracted. <br>
For longer input, please allow 2-3 minutes as the model is running on small CPU. <br>
Base model: Phi-4-mini-instruct from Microsoft. <br>
Prompt tuned by Thuyen Truong for sentiment extraction.
"""
)
with demo:
gr.TabbedInterface([sentiment_extr], ["Sentiment text analysis"])
demo.launch()