File size: 2,214 Bytes
96988f5
999a864
d347764
f51c7fc
f10267f
f08d2a8
f51c7fc
 
f08d2a8
d375927
362c8d1
f51c7fc
2ee3c08
f51c7fc
080cdf3
 
999a864
f51c7fc
 
 
1c012dd
f51c7fc
 
 
 
 
 
1c012dd
feda098
715c813
 
1c012dd
f10267f
f51c7fc
 
 
 
 
 
 
 
 
 
 
 
 
4b09d8f
f41fbdd
f51c7fc
 
 
 
 
 
d2482b4
feda098
97de57b
7538d0a
 
dd57958
735581e
feda098
4dabac9
 
feda098
6884b23
feda098
 
7538d0a
 
 
715c813
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import os
os.system("pip install -q flash_attn==2.7.4.post1 transformers==4.49.0 accelerate>=0.26.0")
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import re
import os
import torch

hf_token = os.environ.get('hf_token')

model_path=  'microsoft/Phi-4-mini-instruct'

model = AutoModelForCausalLM.from_pretrained(
    model_path,
    #device_map="auto",
    #torch_dtype="auto",
    trust_remote_code=True
)
tokenizer = AutoTokenizer.from_pretrained(model_path)

def make_prompt(sentence):
    prompt = ("""
    Given the below sentence(s) can you extract the sentiment and keywords for each sentence:
    
    """ + sentence
    )

    return prompt

def split_conj(text):
    return re.sub('(but|yet|although|however|nevertheless|on the other hand|still|though)', "|", text).split('|')

def get_sentiment_from_llm(review_text):

    pipe = pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer,
    )
 
    generation_args = {
        "max_new_tokens": 500,
        "return_full_text": False,
        "temperature": 0.0,
        "do_sample": False,
    }
    
    question_and_background = make_prompt(review_text)
    messages = [
        {"role": "system", "content": "You are a helpful AI assistant who helps to extract sentiments and keywords from given sentences."},
        {"role": "user", "content": question_and_background}
    ]
    output = pipe(messages, **generation_args)
    print(output)
    return output[0]['generated_text']
    
demo = gr.Blocks()
sentiment_extr = gr.Interface(
    fn=get_sentiment_from_llm,
    inputs=gr.Textbox(label="Text input", type="text"),
    outputs=gr.Textbox(label="Sentiments", type="text"),
    title="Sentiment analysis and keywords extraction",
    description="""
    Enter one or two sentences in the Text Input and click "Submit" to see the sentiments extracted. <br>
    For longer input, please allow 2-3 minutes as the model is running on small CPU. <br>
    Base model: Phi-4-mini-instruct from Microsoft. <br>
    Prompt tuned by Thuyen Truong for sentiment extraction.
    """
)
with demo:
    gr.TabbedInterface([sentiment_extr], ["Sentiment text analysis"])
demo.launch()