thuyentruong commited on
Commit
d2482b4
·
verified ·
1 Parent(s): f41fbdd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -29
app.py CHANGED
@@ -8,7 +8,7 @@ import torch
8
 
9
  hf_token = os.environ.get('hf_token')
10
 
11
- model_path='microsoft/Phi-4-mini-instruct'
12
 
13
  model = AutoModelForCausalLM.from_pretrained(
14
  model_path,
@@ -18,21 +18,6 @@ model = AutoModelForCausalLM.from_pretrained(
18
  )
19
  tokenizer = AutoTokenizer.from_pretrained(model_path)
20
 
21
- Examples_to_teach_model="""
22
- Text: I hate apples
23
- Sentiment analysis:
24
- Sentiments: Negative
25
- PPrint Key words: hate, aples
26
- Text: I enjoy watching it
27
- Sentiment analysis:
28
- Sentiments: Positive
29
- PPrint Key words: enjoy
30
- Text: I'm tired of this long process
31
- Sentiment analysis:
32
- Sentiments: Negative
33
- PPrint Key words: tired, long process
34
- """
35
-
36
  def make_prompt(sentence):
37
  prompt = ("""
38
  Given the below sentence(s) can you extract the sentiment and keywords for each sentence:
@@ -46,24 +31,12 @@ def split_conj(text):
46
  return re.sub('(but|yet|although|however|nevertheless|on the other hand|still|though)', "|", text).split('|')
47
 
48
  def get_sentiment_from_llm(review_text):
49
- #sentences = review_text.lower().split(".")
50
- """
51
- segments=[]
52
- for sen in sentences:
53
- segments=segments+split_conj(sen)
54
- ls_outputs=[]
55
-
56
- segments= [x for x in segments if len(x)>=5]
57
- print(segments)
58
- """
59
 
60
  pipe = pipeline(
61
  "text-generation",
62
  model=model,
63
  tokenizer=tokenizer,
64
  )
65
-
66
-
67
 
68
  generation_args = {
69
  "max_new_tokens": 500,
@@ -79,7 +52,7 @@ def get_sentiment_from_llm(review_text):
79
  ]
80
  output = pipe(messages, **generation_args)
81
  print(output)
82
- return output
83
 
84
  demo = gr.Blocks()
85
  sentiment_extr = gr.Interface(
 
8
 
9
  hf_token = os.environ.get('hf_token')
10
 
11
+ model_path= 'microsoft/Phi-4-mini-instruct-onnx' #'microsoft/Phi-4-mini-instruct'
12
 
13
  model = AutoModelForCausalLM.from_pretrained(
14
  model_path,
 
18
  )
19
  tokenizer = AutoTokenizer.from_pretrained(model_path)
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  def make_prompt(sentence):
22
  prompt = ("""
23
  Given the below sentence(s) can you extract the sentiment and keywords for each sentence:
 
31
  return re.sub('(but|yet|although|however|nevertheless|on the other hand|still|though)', "|", text).split('|')
32
 
33
  def get_sentiment_from_llm(review_text):
 
 
 
 
 
 
 
 
 
 
34
 
35
  pipe = pipeline(
36
  "text-generation",
37
  model=model,
38
  tokenizer=tokenizer,
39
  )
 
 
40
 
41
  generation_args = {
42
  "max_new_tokens": 500,
 
52
  ]
53
  output = pipe(messages, **generation_args)
54
  print(output)
55
+ return output[0]['generated_text']
56
 
57
  demo = gr.Blocks()
58
  sentiment_extr = gr.Interface(