Spaces:
Sleeping
Sleeping
File size: 7,214 Bytes
5526f12 2bee048 5526f12 fb3f6b2 16d19c3 5526f12 2bee048 fb3f6b2 16d19c3 fb3f6b2 2bee048 5526f12 0e9a20e 5526f12 fb3f6b2 16d19c3 fb3f6b2 5526f12 2bee048 5526f12 2bee048 5526f12 fb3f6b2 16d19c3 fb3f6b2 5526f12 fb3f6b2 5526f12 a550bad 5526f12 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 |
##########################################
# Step 0: 导入必需的库
##########################################
import streamlit as st
from transformers import pipeline, SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, AutoModelForCausalLM, AutoTokenizer, pipeline
from datasets import load_dataset
from IPython.display import Audio, display
import torch
import soundfile as sf
from google.colab import drive
from huggingface_hub import login
# Streamlit application title
st.title("Comment reply for you")
st.write("automative reply")
# Text input for user to enter the comment
text = st.text_area("Enter your comment", "")
##########################################
# Step 1:情感分析 - 分析用户评论的情感倾向
##########################################
# Perform tasks when the user clicks the "Comment" button
if st.button("Comment"):
pipe = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base")
# 使用 "j-hartmann/emotion-english-distilroberta-base" 模型进行多维度情感分类
emotion_classifier = pipeline(
"text-classification",
model="j-hartmann/emotion-english-distilroberta-base",
return_all_scores=True
)
# 示例用户评论(可替换为实际评论)
user_review = "I love the fast delivery, but the product quality could be better."
# 对评论进行情感分析
emotion_results = emotion_classifier(user_review)[0] # 返回列表中的第一个结果(单条输入)
# 打印所有情感维度及其分数
print("情感分析结果(多维度):")
for emotion in emotion_results:
print(f"{emotion['label']}: {emotion['score']:.4f}")
st.write("Text:", text)
st.write("Label:", max_label)
st.write("Score:", max_score)
# 提取置信度最高的情感标签(可选)
dominant_emotion = max(emotion_results, key=lambda x: x['score'])
print("\n主导情感:", dominant_emotion['label'], f"(置信度: {dominant_emotion['score']:.2f})")
##########################################
# Step 2:回复生成 - 根据情感生成自动回复
##########################################
emotion_strategies = {
"anger": {
"prompt": (
"Customer complaint: '{review}'\n\n"
"As a customer service representative, craft a professional response that:\n"
"- Begins with sincere apology and acknowledgment\n"
"- Clearly explains solution process with concrete steps\n"
"- Offers appropriate compensation/redemption\n"
"- Keeps tone humble and solution-focused (3-4 sentences)\n\n"
"Response:"
)
},
"disgust": {
"prompt": (
"Customer quality concern: '{review}'\n\n"
"As a customer service representative, craft a response that:\n"
"- Immediately acknowledges the product issue\n"
"- Explains quality control measures being taken\n"
"- Provides clear return/replacement instructions\n"
"- Offers goodwill gesture (3-4 sentences)\n\n"
"Response:"
)
},
"fear": {
"prompt": (
"Customer safety concern: '{review}'\n\n"
"As a customer service representative, craft a reassuring response that:\n"
"- Directly addresses the safety worries\n"
"- References relevant certifications/standards\n"
"- Offers dedicated support contact\n"
"- Provides satisfaction guarantee (3-4 sentences)\n\n"
"Response:"
)
},
"joy": {
"prompt": (
"Customer review: '{review}'\n\n"
"As a customer service representative, craft a concise response that:\n"
"- Specifically acknowledges both positive and constructive feedback\n"
"- Briefly mentions loyalty/referral programs\n"
"- Ends with shopping invitation (3-4 sentences)\n\n"
"Response:"
)
},
"neutral": {
"prompt": (
"Customer feedback: '{review}'\n\n"
"As a customer service representative, craft a balanced response that:\n"
"- Provides additional relevant product information\n"
"- Highlights key service features\n"
"- Politely requests more detailed feedback\n"
"- Maintains professional tone (3-4 sentences)\n\n"
"Response:"
)
},
"sadness": {
"prompt": (
"Customer disappointment: '{review}'\n\n"
"As a customer service representative, craft an empathetic response that:\n"
"- Shows genuine understanding of the issue\n"
"- Proposes personalized recovery solution\n"
"- Offers extended support options\n"
"- Maintains positive outlook (3-4 sentences)\n\n"
"Response:"
)
},
"surprise": {
"prompt": (
"Customer enthusiastic feedback: '{review}'\n\n"
"As a customer service representative, craft a response that:\n"
"- Matches customer's positive energy appropriately\n"
"- Highlights unexpected product benefits\n"
"- Invites to user community/events\n"
"- Maintains brand voice (3-4 sentences)\n\n"
"Response:"
)
}
}
# 生成回复Prompt
template = emotion_strategies[dominant_emotion['label'].lower()]["prompt"]
prompt = template.format(review=user_review)
print(prompt)
# 加载Llama-3作为text generation模型
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B")
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=100)
input_length = inputs.input_ids.shape[1]
response = tokenizer.decode(outputs[0][input_length:], skip_special_tokens=True)
print(response)
##########################################
# Step 3:语音生成 - 根据回复合成语音
##########################################
# 加载模型和处理器
#processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
#speech_model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
#vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
# 创建默认的说话人嵌入
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) # 女性中性语音
# 文本预处理和语音合成
inputs = processor(text=response, return_tensors="pt")
spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings)
# 使用声码器生成波形音频
with torch.no_grad():
speech = vocoder(spectrogram)
# 保存为WAV文件(16kHz采样率)
sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000)
st.text("I wanna tell you that")
st.audio("customer_service_response.wav")
|