joey1101 commited on
Commit
b204d6b
·
verified ·
1 Parent(s): 88adecc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +135 -66
app.py CHANGED
@@ -1,7 +1,7 @@
1
  ##########################################
2
  # Step 0: Import required libraries
3
  ##########################################
4
- import streamlit as st # For building the web application
5
  from transformers import (
6
  pipeline,
7
  SpeechT5Processor,
@@ -17,74 +17,141 @@ import soundfile as sf # For saving audio as .wav files
17
  ##########################################
18
  # Streamlit application title and input
19
  ##########################################
20
- st.title("Comment Reply for You") # Application title
21
- st.write("Generate automatic replies for user comments") # Application description
22
- text = st.text_area("Enter your comment", "") # Text input for user to enter comments
23
 
24
  ##########################################
25
  # Step 1: Sentiment Analysis Function
26
  ##########################################
27
  def analyze_dominant_emotion(user_review):
28
  """
29
- Analyze the dominant emotion in the user's review using a text classification model.
30
  """
31
  emotion_classifier = pipeline(
32
  "text-classification",
33
  model="Thea231/jhartmann_emotion_finetuning",
34
  return_all_scores=True
35
- ) # Load pre-trained emotion classification model
36
 
37
- emotion_results = emotion_classifier(user_review)[0] # Get emotion scores for the review
38
- dominant_emotion = max(emotion_results, key=lambda x: x['score']) # Find the emotion with the highest confidence
39
- return dominant_emotion
40
 
41
  ##########################################
42
  # Step 2: Response Generation Function
43
  ##########################################
44
  def response_gen(user_review):
45
  """
46
- Generate a response based on the sentiment of the user's review.
47
  """
48
- # Use Llama-based model to create a response based on a generated prompt
49
- dominant_emotion = analyze_dominant_emotion(user_review) # Get the dominant emotion
50
- emotion_label = dominant_emotion['label'].lower() # Extract emotion label
51
 
52
- # Define response templates for each emotion
53
- emotion_prompts = {
54
- "anger": (
55
- "Customer complaint: '{review}'\n\n"
56
- "As a customer service representative, write a response that:\n"
57
- "- Sincerely apologizes for the issue\n"
58
- "- Explains how the issue will be resolved\n"
59
- "- Offers compensation where appropriate\n\n"
60
- "Response:"
61
- ),
62
- "joy": (
63
- "Customer review: '{review}'\n\n"
64
- "As a customer service representative, write a positive response that:\n"
65
- "- Thanks the customer for their feedback\n"
66
- "- Acknowledges both positive and constructive comments\n"
67
- "- Invites them to explore loyalty programs\n\n"
68
- "Response:"
69
- ),
70
- # Add other emotions as needed...
71
- }
72
-
73
- # Format the prompt with the user's review
74
- prompt = emotion_prompts.get(emotion_label, "Neutral").format(review=user_review)
75
 
76
- # Load model directly
77
- from transformers import AutoTokenizer, AutoModelForCausalLM
78
 
79
- tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B")
80
- model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B")
81
-
82
- inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the prompt
83
- outputs = model.generate(**inputs, max_new_tokens=100) # Generate a response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
- input_length = inputs.input_ids.shape[1] # Length of the input text
86
- response = tokenizer.decode(outputs[0][input_length:], skip_special_tokens=True) # Decode the generated text
87
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
  ##########################################
90
  # Step 3: Text-to-Speech Conversion Function
@@ -93,26 +160,28 @@ def sound_gen(response):
93
  """
94
  Convert the generated response to speech and save as a .wav file.
95
  """
96
- # Load the pre-trained TTS models
97
- processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
98
- model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
99
- vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
100
-
101
- # Load speaker embeddings (e.g., neutral female voice)
102
- embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
103
- speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)
104
-
105
- # Process the input text and generate a spectrogram
106
  inputs = processor(text=response, return_tensors="pt")
107
  spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings)
108
-
109
- # Use the vocoder to generate a waveform
110
  with torch.no_grad():
111
  speech = vocoder(spectrogram)
112
 
113
- # Save the generated speech as a .wav file
114
  sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000)
115
- st.audio("customer_service_response.wav") # Play the audio in Streamlit
 
 
116
 
117
  ##########################################
118
  # Main Function
@@ -121,11 +190,11 @@ def main():
121
  """
122
  Main function to orchestrate the workflow of sentiment analysis, response generation, and text-to-speech.
123
  """
124
- if text: # Check if the user entered a comment
125
- response = response_gen(text) # Generate a response
126
- st.write(f"Generated response: {response}") # Display the generated response
127
- sound_gen(response) # Convert the response to speech and play it
128
 
129
- # Run the main function
130
  if __name__ == "__main__":
131
- main()
 
1
  ##########################################
2
  # Step 0: Import required libraries
3
  ##########################################
4
+ import streamlit as st # For building the web application interface
5
  from transformers import (
6
  pipeline,
7
  SpeechT5Processor,
 
17
  ##########################################
18
  # Streamlit application title and input
19
  ##########################################
20
+ st.title("Comment Reply for You") # Set the app title for user interface
21
+ st.write("Generate automatic replies for user comments") # Add a brief app description
22
+ text = st.text_area("Enter your comment", "") # Text area for user to input their comment or feedback
23
 
24
  ##########################################
25
  # Step 1: Sentiment Analysis Function
26
  ##########################################
27
  def analyze_dominant_emotion(user_review):
28
  """
29
+ Analyze the dominant emotion in the user's comment using our fine-tuned text classification model.
30
  """
31
  emotion_classifier = pipeline(
32
  "text-classification",
33
  model="Thea231/jhartmann_emotion_finetuning",
34
  return_all_scores=True
35
+ ) # Load our fine-tuned text classification model
36
 
37
+ emotion_results = emotion_classifier(user_review)[0] # Get the emotion classification scores for the input text
38
+ dominant_emotion = max(emotion_results, key=lambda x: x['score']) # Identify the emotion with the highest confidence
39
+ return dominant_emotion # Return the dominant emotion (label and score)
40
 
41
  ##########################################
42
  # Step 2: Response Generation Function
43
  ##########################################
44
  def response_gen(user_review):
45
  """
46
+ Generate a concise and logical response based on the sentiment of the user's comment.
47
  """
 
 
 
48
 
49
+ dominant_emotion = analyze_dominant_emotion(user_review) # Get the dominant emotion of the user's comment
50
+ emotion_label = dominant_emotion['label'].lower() # Extract the emotion label in lowercase format
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
+ # Define response templates for each emotion
 
53
 
54
+ emotion_strategies = {
55
+ "anger": {
56
+ "prompt": (
57
+ "Customer complaint: '{review}'\n\n"
58
+ "As a customer service representative, craft a professional response that:\n"
59
+ "- Begins with sincere apology and acknowledgment\n"
60
+ "- Clearly explains solution process with concrete steps\n"
61
+ "- Offers appropriate compensation/redemption\n"
62
+ "- Keeps tone humble and solution-focused (3-4 sentences)\n\n"
63
+ "Response:"
64
+ )
65
+ },
66
+ "disgust": {
67
+ "prompt": (
68
+ "Customer quality concern: '{review}'\n\n"
69
+ "As a customer service representative, craft a response that:\n"
70
+ "- Immediately acknowledges the product issue\n"
71
+ "- Explains quality control measures being taken\n"
72
+ "- Provides clear return/replacement instructions\n"
73
+ "- Offers goodwill gesture (3-4 sentences)\n\n"
74
+ "Response:"
75
+ )
76
+ },
77
+ "fear": {
78
+ "prompt": (
79
+ "Customer safety concern: '{review}'\n\n"
80
+ "As a customer service representative, craft a reassuring response that:\n"
81
+ "- Directly addresses the safety worries\n"
82
+ "- References relevant certifications/standards\n"
83
+ "- Offers dedicated support contact\n"
84
+ "- Provides satisfaction guarantee (3-4 sentences)\n\n"
85
+ "Response:"
86
+ )
87
+ },
88
+ "joy": {
89
+ "prompt": (
90
+ "Customer review: '{review}'\n\n"
91
+ "As a customer service representative, craft a concise response that:\n"
92
+ "- Specifically acknowledges both positive and constructive feedback\n"
93
+ "- Briefly mentions loyalty/referral programs\n"
94
+ "- Ends with shopping invitation (3-4 sentences)\n\n"
95
+ "Response:"
96
+ )
97
+ },
98
+ "neutral": {
99
+ "prompt": (
100
+ "Customer feedback: '{review}'\n\n"
101
+ "As a customer service representative, craft a balanced response that:\n"
102
+ "- Provides additional relevant product information\n"
103
+ "- Highlights key service features\n"
104
+ "- Politely requests more detailed feedback\n"
105
+ "- Maintains professional tone (3-4 sentences)\n\n"
106
+ "Response:"
107
+ )
108
+ },
109
+ "sadness": {
110
+ "prompt": (
111
+ "Customer disappointment: '{review}'\n\n"
112
+ "As a customer service representative, craft an empathetic response that:\n"
113
+ "- Shows genuine understanding of the issue\n"
114
+ "- Proposes personalized recovery solution\n"
115
+ "- Offers extended support options\n"
116
+ "- Maintains positive outlook (3-4 sentences)\n\n"
117
+ "Response:"
118
+ )
119
+ },
120
+ "surprise": {
121
+ "prompt": (
122
+ "Customer enthusiastic feedback: '{review}'\n\n"
123
+ "As a customer service representative, craft a response that:\n"
124
+ "- Matches customer's positive energy appropriately\n"
125
+ "- Highlights unexpected product benefits\n"
126
+ "- Invites to user community/events\n"
127
+ "- Maintains brand voice (3-4 sentences)\n\n"
128
+ "Response:"
129
+ )
130
+ }
131
+
132
+ # Select the appropriate prompt based on the user's emotion, or default to neutral
133
+ prompt = emotion_prompts.get(
134
+ emotion_label,
135
+ f"Neutral feedback: '{user_review}'\n\nWrite a professional and concise response (50-200 words max).\n\nResponse:"
136
+ )
137
 
138
+ # Load the tokenizer and language model for text generation
139
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B") # Load tokenizer for processing text inputs
140
+ model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B") # Load language model for response generation
141
+
142
+ inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the input prompt
143
+ outputs = model.generate(
144
+ **inputs,
145
+ max_new_tokens=300, # Set the upper limit of tokens generated to ensure the response isn't too lengthy
146
+ min_length=75, # Set the minimum length of the generated response
147
+ no_repeat_ngram_size=2, # Avoid repeating phrases
148
+ temperature=0.7 # Add slight randomness for natural-sounding responses
149
+ )
150
+
151
+ # Decode the generated response back into text
152
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
153
+ print(f" {response}") # Debug print statement for generated text
154
+ return response # Return the generated response
155
 
156
  ##########################################
157
  # Step 3: Text-to-Speech Conversion Function
 
160
  """
161
  Convert the generated response to speech and save as a .wav file.
162
  """
163
+ # Load the pre-trained TTS models for speech synthesis
164
+ processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") # Pre-trained processor for TTS
165
+ model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") # Pre-trained TTS model
166
+ vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") # Vocoder for generating waveforms
167
+
168
+ # Load a neutral female voice embedding from a pre-trained dataset
169
+ embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") # Load speaker embeddings
170
+ speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) # Use a default speaker embedding
171
+
172
+ # Process the input text and create a speech spectrogram
173
  inputs = processor(text=response, return_tensors="pt")
174
  spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings)
175
+
176
+ # Convert the spectrogram into an audio waveform using the vocoder
177
  with torch.no_grad():
178
  speech = vocoder(spectrogram)
179
 
180
+ # Save the audio as a .wav file
181
  sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000)
182
+ # Play the generated audio in the Streamlit app
183
+ st.audio("customer_service_response.wav") # Embed an audio player in the web app
184
+
185
 
186
  ##########################################
187
  # Main Function
 
190
  """
191
  Main function to orchestrate the workflow of sentiment analysis, response generation, and text-to-speech.
192
  """
193
+ if text: # Check if the user has entered a comment
194
+ response = response_gen(text) # Generate a logical and concise response
195
+ st.write(f"I wanna tell you that: {response}") # Display the generated response in the Streamlit app
196
+ sound_gen(response) # Convert the text response to speech and make it available for playback
197
 
198
+ # Run the main function when the script is executed
199
  if __name__ == "__main__":
200
+ main()