joey1101 commited on
Commit
57c2123
·
verified ·
1 Parent(s): 02117b7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -21
app.py CHANGED
@@ -18,30 +18,48 @@ import sentencepiece # For tokenization (required by SpeechT5Processor)
18
  ##########################################
19
  # Streamlit application title and input
20
  ##########################################
21
- st.title("Just Comment") # Set the app title for user interface
22
- st.write("I'm listening to you, my friend") # Add a brief app description
23
- text = st.text_area("Enter your comment", "") # Text area for user to input their comment or feedback
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  ##########################################
26
  # Step 1: Sentiment Analysis Function
27
  ##########################################
28
  def analyze_dominant_emotion(user_review):
29
  """
30
- Analyze the dominant emotion in the user's comment using our fine-tuned text classification model.
31
  """
32
  emotion_classifier = pipeline(
33
  "text-classification",
34
  model="Thea231/jhartmann_emotion_finetuning",
35
  return_all_scores=True
36
- ) # Load our fine-tuned text classification model
37
-
38
- emotion_results = emotion_classifier(user_review)[0] # Get the emotion classification scores for the input text
39
- dominant_emotion = max(emotion_results, key=lambda x: x['score']) # Identify the emotion with the highest confidence
40
  return dominant_emotion # Return the dominant emotion (label and score)
41
 
 
42
  ##########################################
43
  # Step 2: Response Generation Function
44
  ##########################################
 
45
  def response_gen(user_review):
46
  """
47
  Generate a concise and logical response based on the sentiment of the user's comment.
@@ -115,26 +133,26 @@ def response_gen(user_review):
115
  )
116
  }
117
 
118
- # Select the appropriate prompt based on the user's emotion, or default to neutral
119
  prompt = emotion_prompts.get(
120
  emotion_label,
121
  f"Neutral feedback: '{user_review}'\n\nWrite a professional and concise response (50-200 words max).\n\nResponse:"
122
  )
123
-
124
- # Load the tokenizer and language model for text generation
125
  tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B") # Load tokenizer for processing text inputs
126
- model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B") # Load language model for response generation
127
 
128
  inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the input prompt
129
  outputs = model.generate(
130
  **inputs,
131
- max_new_tokens=300, # Set the upper limit of tokens generated to ensure the response isn't too lengthy
132
- min_length=75, # Set the minimum length of the generated response
133
- no_repeat_ngram_size=2, # Avoid repeating phrases
134
- temperature=0.7 # Add slight randomness for natural-sounding responses
135
  )
136
 
137
- # Decode the generated response back into text
138
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
139
  print(f"Generated response: {response}") # Print the response for debugging
140
  return response # Return the generated response
@@ -161,10 +179,12 @@ def sound_gen(response):
161
  # Convert the spectrogram into an audio waveform using the vocoder
162
  with torch.no_grad():
163
  speech = vocoder(spectrogram)
164
-
165
  # Save the audio as a .wav file
166
  sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000)
167
- st.audio("customer_service_response.wav") # Embed an audio player in the web app
 
 
168
 
169
  ##########################################
170
  # Main Function
@@ -175,8 +195,11 @@ def main():
175
  """
176
  if text: # Check if the user has entered a comment
177
  response = response_gen(text) # Generate a logical and concise response
178
- st.write(f"I wanna tell you that: {response}") # Display the generated response in the Streamlit app
179
- sound_gen(response) # Convert the text response to speech and make it available for playback
 
 
 
180
 
181
  # Run the main function when the script is executed
182
  if __name__ == "__main__":
 
18
  ##########################################
19
  # Streamlit application title and input
20
  ##########################################
21
+ # Display a colorful, large title in a visually appealing font
22
+ st.markdown(
23
+ "<h1 style='text-align: center; color: #FF5733; font-size: 50px;'>Just Comment</h1>",
24
+ unsafe_allow_html=True
25
+ ) # Use HTML and CSS to set a custom title design
26
+
27
+ # Display a smaller, gentle and warm subtitle below the title
28
+ st.markdown(
29
+ "<h3 style='text-align: center; color: #5D6D7E; font-style: italic;'>I'm listening to you, my friend</h3>",
30
+ unsafe_allow_html=True
31
+ ) # Use HTML to add a friendly and soft-styled subtitle
32
+
33
+ # Add a well-designed text area for user input
34
+ text = st.text_area(
35
+ "Enter your comment",
36
+ placeholder="Type something here...",
37
+ height=150,
38
+ help="Write a comment you would like us to analyze and respond to!" # Provide a helpful tooltip
39
+ )
40
 
41
  ##########################################
42
  # Step 1: Sentiment Analysis Function
43
  ##########################################
44
  def analyze_dominant_emotion(user_review):
45
  """
46
+ Analyze the dominant emotion in the user's comment using a fine-tuned text classification model.
47
  """
48
  emotion_classifier = pipeline(
49
  "text-classification",
50
  model="Thea231/jhartmann_emotion_finetuning",
51
  return_all_scores=True
52
+ ) # Load the fine-tuned text classification model from Hugging Face
53
+
54
+ emotion_results = emotion_classifier(user_review)[0] # Perform sentiment analysis on the input text
55
+ dominant_emotion = max(emotion_results, key=lambda x: x['score']) # Identify the emotion with the highest confidence score
56
  return dominant_emotion # Return the dominant emotion (label and score)
57
 
58
+
59
  ##########################################
60
  # Step 2: Response Generation Function
61
  ##########################################
62
+
63
  def response_gen(user_review):
64
  """
65
  Generate a concise and logical response based on the sentiment of the user's comment.
 
133
  )
134
  }
135
 
136
+ # Select the appropriate prompt based on the user's emotion or default to neutral
137
  prompt = emotion_prompts.get(
138
  emotion_label,
139
  f"Neutral feedback: '{user_review}'\n\nWrite a professional and concise response (50-200 words max).\n\nResponse:"
140
  )
141
+
142
+ # Load the tokenizer and language model for response generation
143
  tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B") # Load tokenizer for processing text inputs
144
+ model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B") # Load language model for text generation
145
 
146
  inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the input prompt
147
  outputs = model.generate(
148
  **inputs,
149
+ max_new_tokens=300, # Set an upper limit on token generation to ensure concise output
150
+ min_length=75, # Set a minimum length to ensure the response is complete
151
+ no_repeat_ngram_size=2, # Avoid repetitive phrases
152
+ temperature=0.7 # Add randomness for more natural responses
153
  )
154
 
155
+ # Decode the generated response back into readable text
156
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
157
  print(f"Generated response: {response}") # Print the response for debugging
158
  return response # Return the generated response
 
179
  # Convert the spectrogram into an audio waveform using the vocoder
180
  with torch.no_grad():
181
  speech = vocoder(spectrogram)
182
+
183
  # Save the audio as a .wav file
184
  sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000)
185
+
186
+ # Embed an auto-playing audio player in the web app
187
+ st.audio("customer_service_response.wav", start_time=0) # Allow audio playback with autoplay feature
188
 
189
  ##########################################
190
  # Main Function
 
195
  """
196
  if text: # Check if the user has entered a comment
197
  response = response_gen(text) # Generate a logical and concise response
198
+ st.markdown(
199
+ f"<p style='color:#2ECC71; font-size:20px;'>{response}</p>",
200
+ unsafe_allow_html=True
201
+ ) # Display the generated response in a cute, styled font
202
+ sound_gen(response) # Convert the response to speech and make it available for playback
203
 
204
  # Run the main function when the script is executed
205
  if __name__ == "__main__":