Spaces:
Sleeping
Sleeping
########################################## | |
# Step 0: Import required libraries | |
########################################## | |
import streamlit as st # For building the web application interface | |
import soundfile as sf # For saving audio as .wav files | |
from transformers import ( | |
pipeline, | |
SpeechT5Processor, | |
SpeechT5ForTextToSpeech, | |
SpeechT5HifiGan, | |
AutoModelForCausalLM, | |
AutoTokenizer | |
) # For sentiment analysis, text-to-speech, and text generation | |
from datasets import load_dataset # For loading datasets (e.g., speaker embeddings) | |
########################################## | |
# Streamlit application title and input | |
########################################## | |
# Display a colorful, large title in a visually appealing font | |
st.markdown( | |
"<h1 style='text-align: center; color: #FF5720; font-size: 50px;'>Just Comment</h1>", | |
unsafe_allow_html=True | |
) # Use HTML and CSS to set a custom title design | |
# Display a smaller, gentle and warm subtitle below the title | |
st.markdown( | |
"<h3 style='text-align: center; color: #5D6D7E; font-style: italic;'>I'm listening to you, my friend</h3>", | |
unsafe_allow_html=True | |
) # Use HTML to add a friendly and soft-styled subtitle | |
# Add a well-designed text area for user input | |
text = st.text_area( | |
"Enter your comment", | |
placeholder="Type something here...", | |
height=280, | |
help="Write a comment you would like us to analyze and respond to!" # Provide a helpful tooltip | |
) | |
########################################## | |
# Step 1: Sentiment Analysis Function | |
########################################## | |
def analyze_dominant_emotion(user_review): | |
""" | |
Analyze the dominant emotion in the user's comment using a fine-tuned text classification model. | |
""" | |
emotion_classifier = pipeline( | |
"text-classification", | |
model="Thea231/jhartmann_emotion_finetuning", | |
return_all_scores=True | |
) # Load the fine-tuned text classification model from Hugging Face | |
emotion_results = emotion_classifier(user_review)[0] # Perform sentiment analysis on the input text | |
dominant_emotion = max(emotion_results, key=lambda x: x['score']) # Identify the emotion with the highest confidence score | |
return dominant_emotion # Return the dominant emotion (label and score) | |
########################################## | |
# Step 2: Response Generation Function | |
########################################## | |
def response_gen(user_review): | |
""" | |
Generate a concise and logical response based on the sentiment of the user's comment. | |
""" | |
dominant_emotion = analyze_dominant_emotion(user_review) # Get the dominant emotion of the user's comment | |
emotion_label = dominant_emotion['label'].lower() # Extract the emotion label in lowercase format | |
# Define response templates for each emotion | |
emotion_prompts = { | |
"anger": ( | |
f"'{user_review}'\n\n" | |
"As a customer service representative, craft a professional response that:\n" | |
"- Begins with sincere apology and acknowledgment\n" | |
"- Clearly explains solution process with concrete steps\n" | |
"- Offers appropriate compensation/redemption\n" | |
"- Keeps tone humble and solution-focused (3-4 sentences)\n\n" | |
"Response:" | |
), | |
"disgust": ( | |
f"'{user_review}'\n\n" | |
"As a customer service representative, craft a response that:\n" | |
"- Immediately acknowledges the product issue\n" | |
"- Explains quality control measures being taken\n" | |
"- Provides clear return/replacement instructions\n" | |
"- Offers goodwill gesture (3-4 sentences)\n\n" | |
"Response:" | |
), | |
"fear": ( | |
f"'{user_review}'\n\n" | |
"As a customer service representative, craft a reassuring response that:\n" | |
"- Directly addresses the safety worries\n" | |
"- References relevant certifications/standards\n" | |
"- Offers dedicated support contact\n" | |
"- Provides satisfaction guarantee (3-4 sentences)\n\n" | |
"Response:" | |
), | |
"joy": ( | |
f"'{user_review}'\n\n" | |
"As a customer service representative, craft a concise and enthusiastic response that:\n" | |
"- Thanks the customer for their feedback\n" | |
"- Acknowledges both positive and constructive comments\n" | |
"- Invites them to explore loyalty programs\n\n" | |
"Response:" | |
), | |
"neutral": ( | |
f"'{user_review}'\n\n" | |
"As a customer service representative, craft a balanced response that:\n" | |
"- Provides additional relevant product information\n" | |
"- Highlights key service features\n" | |
"- Politely requests more detailed feedback\n" | |
"- Maintains professional tone (3-4 sentences)\n\n" | |
"Response:" | |
), | |
"sadness": ( | |
f"'{user_review}'\n\n" | |
"As a customer service representative, craft an empathetic response that:\n" | |
"- Shows genuine understanding of the issue\n" | |
"- Proposes personalized recovery solution\n" | |
"- Offers extended support options\n" | |
"- Maintains positive outlook (3-4 sentences)\n\n" | |
"Response:" | |
), | |
"surprise": ( | |
f"'{user_review}'\n\n" | |
"As a customer service representative, craft a response that:\n" | |
"- Matches customer's positive energy appropriately\n" | |
"- Highlights unexpected product benefits\n" | |
"- Invites to user community/events\n" | |
"- Maintains brand voice (3-4 sentences)\n\n" | |
"Response:" | |
) | |
} | |
# Select the appropriate prompt based on the user's emotion or default to neutral | |
prompt = emotion_prompts.get( | |
emotion_label, | |
f"Neutral feedback: '{user_review}'\n\nWrite a professional and concise response (50-200 words max).\n\nResponse:" | |
) | |
# Load the tokenizer and language model for response generation | |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B") # Load tokenizer for processing text inputs | |
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B") # Load language model for text generation | |
inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the input prompt | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=300, # Set an upper limit on token generation to ensure concise output | |
min_length=75, # Set a minimum length to ensure the response is complete | |
no_repeat_ngram_size=2, # Avoid repetitive phrases | |
temperature=0.7 # Add randomness for more natural responses | |
) | |
# Decode the generated response back into readable text | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
print(f"{response}") # Print the response for debugging | |
return response # Return the generated response | |
########################################## | |
# Step 3: Text-to-Speech Conversion Function | |
########################################## | |
def sound_gen(response): | |
""" | |
Convert the generated response to speech and save it as a .wav file. | |
""" | |
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") # Pre-trained processor for TTS | |
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") # Pre-trained TTS model | |
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") # Vocoder for generating waveforms | |
# Load a neutral female voice embedding from a pre-trained dataset | |
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") # Load speaker embeddings | |
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) # Use a default speaker embedding | |
# Process the input text and create a speech spectrogram | |
inputs = processor(text=response, return_tensors="pt") | |
spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings) | |
# Convert the spectrogram into an audio waveform using the vocoder | |
with torch.no_grad(): | |
speech = vocoder(spectrogram) | |
# Save the audio as a .wav file | |
sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000) | |
# Embed an auto-playing audio player in the web app | |
st.audio("customer_service_response.wav", start_time=0) # Allow audio playback with autoplay feature | |
########################################## | |
# Main Function | |
########################################## | |
def main(): | |
""" | |
Main function to orchestrate the workflow of sentiment analysis, response generation, and text-to-speech. | |
""" | |
if text: # Check if the user has entered a comment | |
response = response_gen(text) # Generate a logical and concise response | |
st.markdown( | |
f"<p style='color:#2ECC71; font-size:20px;'>{response}</p>", | |
unsafe_allow_html=True | |
) # Display the generated response in a cute, styled font | |
sound_gen(response) # Convert the response to speech and make it available for playback | |
# Run the main function when the script is executed | |
if __name__ == "__main__": | |
main() | |