Spaces:
Running
Running
########################################## | |
# Step 0: Import required libraries | |
########################################## | |
import streamlit as st # For building the web application interface | |
from transformers import ( # For text classification, text-to-speech, and text generation | |
pipeline, | |
SpeechT5Processor, | |
SpeechT5ForTextToSpeech, | |
SpeechT5HifiGan, | |
AutoModelForCausalLM, | |
AutoTokenizer | |
) | |
from datasets import load_dataset # For loading speaker embeddings dataset | |
import torch # For tensor operations | |
import soundfile as sf # For saving audio as .wav files | |
import sentencepiece # Required by SpeechT5Processor for tokenization | |
########################################## | |
# Streamlit application title and input | |
########################################## | |
# Display a deep blue title using HTML and CSS | |
st.markdown( | |
"<h1 style='text-align: center; color: #00008B; font-size: 50px;'>Just Comment</h1>", | |
unsafe_allow_html=True | |
) # Set the title in deep blue | |
# Display a gentle, warm subtitle below the title | |
st.markdown( | |
"<h3 style='text-align: center; color: #5D6D7E; font-style: italic;'>I'm listening to you, my friend</h3>", | |
unsafe_allow_html=True | |
) # Set the subtitle with warm styling | |
# Provide a text area for user input with a placeholder and tooltip | |
text = st.text_area( | |
"Enter your comment", | |
placeholder="Type something here...", | |
height=100, | |
help="Write a comment you would like us to respond to!" # Tooltip for guidance | |
) # Create the text input field | |
########################################## | |
# Step 1: Sentiment Analysis Function | |
########################################## | |
def analyze_dominant_emotion(user_review): | |
""" | |
Analyze the dominant emotion in the user's comment using a fine-tuned text classification model. | |
""" | |
# Load the fine-tuned sentiment classification model from Hugging Face | |
emotion_classifier = pipeline( | |
"text-classification", | |
model="Thea231/jhartmann_emotion_finetuning", | |
return_all_scores=True | |
) | |
# Get sentiment scores for the input text | |
emotion_results = emotion_classifier(user_review)[0] | |
# Identify the emotion with the highest confidence score | |
dominant_emotion = max(emotion_results, key=lambda x: x['score']) | |
return dominant_emotion # Return the dominant emotion as a dictionary | |
########################################## | |
# Step 2: Response Generation Functions | |
########################################## | |
def prompt_gen(user_review): | |
""" | |
Generate the text generation prompt based on the user's comment and detected emotion. | |
""" | |
# Determine the dominant emotion from the user's comment | |
dominant_emotion = analyze_dominant_emotion(user_review) | |
# Define prompt templates for seven emotions | |
emotion_strategies = { | |
"anger": { | |
"prompt": ( | |
"Customer complaint: '{review}'\n\n" | |
"As a customer service representative, craft a professional response that:\n" | |
"- Begins with a sincere apology and acknowledgment.\n" | |
"- Clearly explains a solution process with concrete steps.\n" | |
"- Offers appropriate compensation or redemption.\n" | |
"- Keeps a humble, solution-focused tone (1-3 sentences).\n\n" | |
"Response:" | |
) | |
}, | |
"disgust": { | |
"prompt": ( | |
"Customer quality concern: '{review}'\n\n" | |
"As a customer service representative, craft a response that:\n" | |
"- Immediately acknowledges the product issue.\n" | |
"- Explains quality control measures being taken.\n" | |
"- Provides clear return/replacement instructions.\n" | |
"- Offers a goodwill gesture (1-3 sentences).\n\n" | |
"Response:" | |
) | |
}, | |
"fear": { | |
"prompt": ( | |
"Customer safety concern: '{review}'\n\n" | |
"As a customer service representative, craft a reassuring response that:\n" | |
"- Directly addresses safety worries.\n" | |
"- References relevant certifications or standards.\n" | |
"- Offers dedicated support contact.\n" | |
"- Provides a satisfaction guarantee (1-3 sentences).\n\n" | |
"Response:" | |
) | |
}, | |
"joy": { | |
"prompt": ( | |
"Customer review: '{review}'\n\n" | |
"As a customer service representative, craft a concise response that:\n" | |
"- Thanks the customer for their feedback.\n" | |
"- Acknowledges both positive and constructive points.\n" | |
"- Invites exploration of loyalty or referral programs (1-3 sentences).\n\n" | |
"Response:" | |
) | |
}, | |
"neutral": { | |
"prompt": ( | |
"Customer feedback: '{review}'\n\n" | |
"As a customer service representative, craft a balanced response that:\n" | |
"- Provides relevant product information.\n" | |
"- Highlights key service features.\n" | |
"- Politely requests detailed feedback.\n" | |
"- Maintains a professional tone (1-3 sentences).\n\n" | |
"Response:" | |
) | |
}, | |
"sadness": { | |
"prompt": ( | |
"Customer disappointment: '{review}'\n\n" | |
"As a customer service representative, craft an empathetic response that:\n" | |
"- Shows genuine understanding of the issue.\n" | |
"- Proposes a personalized recovery solution.\n" | |
"- Offers extended support options.\n" | |
"- Maintains a positive outlook (1-3 sentences).\n\n" | |
"Response:" | |
) | |
}, | |
"surprise": { | |
"prompt": ( | |
"Customer enthusiastic feedback: '{review}'\n\n" | |
"As a customer service representative, craft a response that:\n" | |
"- Matches the customer's positive energy.\n" | |
"- Highlights unexpected product benefits.\n" | |
"- Invites the customer to join community events.\n" | |
"- Maintains the brand's voice (1-3 sentences).\n\n" | |
"Response:" | |
) | |
} | |
} # End dictionary of prompt templates | |
# Select the template based on detected emotion; default to neutral if not found | |
template = emotion_strategies.get(dominant_emotion["label"].lower(), emotion_strategies["neutral"])["prompt"] | |
prompt = template.format(review=user_review) # Format the prompt with the user's comment | |
print(f"Generated prompt: {prompt}") # Debug: print the generated prompt using an f-string | |
return prompt # Return the text generation prompt | |
def response_gen(user_review): | |
""" | |
Generate a response using text generation based on the user's comment and detected emotion. | |
""" | |
# Get the text generation prompt based on the user's comment and its dominant emotion | |
prompt = prompt_gen(user_review) | |
# Load the tokenizer and language model for text generation | |
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B") # Load tokenizer | |
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B") # Load causal language model | |
inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the prompt | |
# Generate a response with constraints to ensure a concise and complete answer | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=100, # Allow up to 100 new tokens for the generated answer | |
min_length=30, # Ensure at least 30 tokens in the generated response | |
no_repeat_ngram_size=2, # Avoid repeated phrases | |
temperature=0.7 # Moderate randomness for creative responses | |
) | |
input_length = inputs.input_ids.shape[1] # Get the length of the input prompt | |
# Decode only the generated text (after the prompt) | |
response = tokenizer.decode(outputs[0][input_length:], skip_special_tokens=True) | |
print(f"Generated response: {response}") # Debug: print the generated response using an f-string | |
return response # Return the generated response | |
########################################## | |
# Step 3: Text-to-Speech Conversion Function | |
########################################## | |
def sound_gen(response): | |
""" | |
Convert the generated response to speech and embed an auto-playing audio player. | |
""" | |
# Load the SpeechT5 processor, TTS model, and vocoder for audio synthesis | |
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") # Load TTS processor | |
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") # Load TTS model | |
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") # Load vocoder | |
# Process the entire generated response text for TTS | |
inputs = processor(text=response, return_tensors="pt") # Tokenize and process the response | |
# Create a dummy speaker embedding with the expected dimension (1 x 1280) | |
speaker_embeddings = torch.zeros(1, 1280, dtype=torch.float32) # Dummy embedding to avoid shape mismatches | |
# Generate the speech spectrogram using the input tokens and dummy speaker embeddings | |
spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings) | |
with torch.no_grad(): | |
speech = vocoder(spectrogram) # Convert the spectrogram into an audio waveform | |
# Save the audio waveform as a .wav file with a 16kHz sampling rate | |
sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000) | |
# Embed an auto-playing audio player in the app to play the full response | |
st.audio("customer_service_response.wav", start_time=0) | |
########################################## | |
# Main Function | |
########################################## | |
def main(): | |
""" | |
Main function to orchestrate text generation and text-to-speech conversion. | |
It displays only the generated response and plays its audio without extra information. | |
""" | |
if text: # Only proceed if the user has entered a comment | |
response = response_gen(text) # Generate a response based on text generation and emotion detection | |
st.markdown( | |
f"<p style='color:#3498DB; font-size:20px;'>{response}</p>", | |
unsafe_allow_html=True | |
) # Display the response in styled formatting (only the final answer) | |
sound_gen(response) # Convert the full generated response to speech and embed the audio player | |
print(f"Final generated response: {response}") # Debug: print the final response using an f-string | |
# Execute the main function when the script is run | |
if __name__ == "__main__": | |
main() # Call the main function | |