Spaces:
Sleeping
Sleeping
File size: 9,158 Bytes
5526f12 6a85efc 5526f12 b204d6b 6a85efc 2bee048 fb3f6b2 6a85efc fb3f6b2 0152128 b204d6b 5526f12 6a85efc 298c5bd 6a85efc b204d6b 6a85efc 298c5bd 6a85efc 298c5bd b204d6b 6a85efc b204d6b 2bee048 fb3f6b2 6a85efc fb3f6b2 298c5bd 6a85efc b204d6b 6a85efc b204d6b 6a85efc b204d6b a6df8ff b204d6b 6a85efc b204d6b 5526f12 6a85efc 5526f12 298c5bd 6a85efc b204d6b 6a85efc b204d6b 6a85efc b204d6b 6a85efc b204d6b 5526f12 6a85efc b189e5a 6a85efc b204d6b 6a85efc b204d6b 298c5bd b204d6b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 |
##########################################
# Step 0: Import required libraries
##########################################
import streamlit as st # For building the web application interface
from transformers import (
pipeline,
SpeechT5Processor,
SpeechT5ForTextToSpeech,
SpeechT5HifiGan,
AutoModelForCausalLM,
AutoTokenizer
) # For emotion analysis, text-to-speech, and text generation
from datasets import load_dataset # For loading datasets (e.g., speaker embeddings)
import torch # For tensor operations
import soundfile as sf # For saving audio as .wav files
##########################################
# Streamlit application title and input
##########################################
st.title("Just Comment") # Set the app title for user interface
st.write("I'm listening to you, my friend") # Add a brief app description
text = st.text_area("Enter your comment", "") # Text area for user to input their comment or feedback
##########################################
# Step 1: Sentiment Analysis Function
##########################################
def analyze_dominant_emotion(user_review):
"""
Analyze the dominant emotion in the user's comment using our fine-tuned text classification model.
"""
emotion_classifier = pipeline(
"text-classification",
model="Thea231/jhartmann_emotion_finetuning",
return_all_scores=True
) # Load our fine-tuned text classification model
emotion_results = emotion_classifier(user_review)[0] # Get the emotion classification scores for the input text
dominant_emotion = max(emotion_results, key=lambda x: x['score']) # Identify the emotion with the highest confidence
return dominant_emotion # Return the dominant emotion (label and score)
##########################################
# Step 2: Response Generation Function
##########################################
def response_gen(user_review):
"""
Generate a concise and logical response based on the sentiment of the user's comment.
"""
dominant_emotion = analyze_dominant_emotion(user_review) # Get the dominant emotion of the user's comment
emotion_label = dominant_emotion['label'].lower() # Extract the emotion label in lowercase format
# Define response templates for each emotion
emotion_strategies = {
"anger": {
"prompt": (
"Customer complaint: '{review}'\n\n"
"As a customer service representative, craft a professional response that:\n"
"- Begins with sincere apology and acknowledgment\n"
"- Clearly explains solution process with concrete steps\n"
"- Offers appropriate compensation/redemption\n"
"- Keeps tone humble and solution-focused (3-4 sentences)\n\n"
"Response:"
)
},
"disgust": {
"prompt": (
"Customer quality concern: '{review}'\n\n"
"As a customer service representative, craft a response that:\n"
"- Immediately acknowledges the product issue\n"
"- Explains quality control measures being taken\n"
"- Provides clear return/replacement instructions\n"
"- Offers goodwill gesture (3-4 sentences)\n\n"
"Response:"
)
},
"fear": {
"prompt": (
"Customer safety concern: '{review}'\n\n"
"As a customer service representative, craft a reassuring response that:\n"
"- Directly addresses the safety worries\n"
"- References relevant certifications/standards\n"
"- Offers dedicated support contact\n"
"- Provides satisfaction guarantee (3-4 sentences)\n\n"
"Response:"
)
},
"joy": {
"prompt": (
"Customer review: '{review}'\n\n"
"As a customer service representative, craft a concise response that:\n"
"- Specifically acknowledges both positive and constructive feedback\n"
"- Briefly mentions loyalty/referral programs\n"
"- Ends with shopping invitation (3-4 sentences)\n\n"
"Response:"
)
},
"neutral": {
"prompt": (
"Customer feedback: '{review}'\n\n"
"As a customer service representative, craft a balanced response that:\n"
"- Provides additional relevant product information\n"
"- Highlights key service features\n"
"- Politely requests more detailed feedback\n"
"- Maintains professional tone (3-4 sentences)\n\n"
"Response:"
)
},
"sadness": {
"prompt": (
"Customer disappointment: '{review}'\n\n"
"As a customer service representative, craft an empathetic response that:\n"
"- Shows genuine understanding of the issue\n"
"- Proposes personalized recovery solution\n"
"- Offers extended support options\n"
"- Maintains positive outlook (3-4 sentences)\n\n"
"Response:"
)
},
"surprise": {
"prompt": (
"Customer enthusiastic feedback: '{review}'\n\n"
"As a customer service representative, craft a response that:\n"
"- Matches customer's positive energy appropriately\n"
"- Highlights unexpected product benefits\n"
"- Invites to user community/events\n"
"- Maintains brand voice (3-4 sentences)\n\n"
"Response:"
)
}
# Select the appropriate prompt based on the user's emotion, or default to neutral
prompt = emotion_prompts.get(
emotion_label,
f"Neutral feedback: '{user_review}'\n\nWrite a professional and concise response (50-200 words max).\n\nResponse:"
)
# Load the tokenizer and language model for text generation
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B") # Load tokenizer for processing text inputs
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B") # Load language model for response generation
inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the input prompt
outputs = model.generate(
**inputs,
max_new_tokens=300, # Set the upper limit of tokens generated to ensure the response isn't too lengthy
min_length=75, # Set the minimum length of the generated response
no_repeat_ngram_size=2, # Avoid repeating phrases
temperature=0.7 # Add slight randomness for natural-sounding responses
)
# Decode the generated response back into text
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(f" {response}") # Debug print statement for generated text
return response # Return the generated response
##########################################
# Step 3: Text-to-Speech Conversion Function
##########################################
def sound_gen(response):
"""
Convert the generated response to speech and save as a .wav file.
"""
# Load the pre-trained TTS models for speech synthesis
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") # Pre-trained processor for TTS
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") # Pre-trained TTS model
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") # Vocoder for generating waveforms
# Load a neutral female voice embedding from a pre-trained dataset
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") # Load speaker embeddings
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) # Use a default speaker embedding
# Process the input text and create a speech spectrogram
inputs = processor(text=response, return_tensors="pt")
spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings)
# Convert the spectrogram into an audio waveform using the vocoder
with torch.no_grad():
speech = vocoder(spectrogram)
# Save the audio as a .wav file
sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000)
# Play the generated audio in the Streamlit app
st.audio("customer_service_response.wav") # Embed an audio player in the web app
##########################################
# Main Function
##########################################
def main():
"""
Main function to orchestrate the workflow of sentiment analysis, response generation, and text-to-speech.
"""
if text: # Check if the user has entered a comment
response = response_gen(text) # Generate a logical and concise response
st.write(f"I wanna tell you that: {response}") # Display the generated response in the Streamlit app
sound_gen(response) # Convert the text response to speech and make it available for playback
# Run the main function when the script is executed
if __name__ == "__main__":
main() |