joey1101 commited on
Commit
5526f12
·
verified ·
1 Parent(s): c81ed4b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +167 -35
app.py CHANGED
@@ -1,56 +1,188 @@
1
- # Colab 中安装必要的包(通常 Colab 已经安装 transformers,但为了确保版本推荐运行以下代码)
2
  !pip install --upgrade transformers
3
- !pip install soundfile # 有些 TTS 模型可能依赖 soundfile
4
 
5
- # 导入必要的库
6
- from transformers import pipeline
 
 
 
 
7
  from IPython.display import Audio, display
8
  import torch
 
 
 
 
 
 
9
 
10
  ##########################################
11
  # Step 1:情感分析 - 分析用户评论的情感倾向
12
  ##########################################
13
- # 本示例中选择 distilbert-base-uncased-finetuned-sst-2-english 模型用于英文情感分类
14
- sentiment_classifier = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
 
 
 
 
 
 
 
15
 
16
  # 示例用户评论(可替换为实际评论)
17
  user_review = "I love the fast delivery, but the product quality could be better."
18
 
19
- # 进行情感分析
20
- sentiment_result = sentiment_classifier(user_review)[0]
21
- print("情感分析结果:", sentiment_result)
 
 
 
 
 
 
 
 
 
 
22
 
23
  ##########################################
24
  # Step 2:回复生成 - 根据情感生成自动回复
25
  ##########################################
26
- # 使用 microsoft/DialoGPT-medium 模型生成回复
27
- reply_generator = pipeline("text-generation", model="microsoft/DialoGPT-medium")
28
-
29
- # 根据情感构造 Prompt,本次修改在 prompt 中加入明确的“回复:”提示,
30
- # 以引导模型产生新内容而不是简单重复输入内容
31
- label = sentiment_result['label'].upper()
32
- if "POSITIVE" in label or "LABEL_1" in label:
33
- prompt = (f"用户评论表达了正面情感:\n评论内容: {user_review}\n"
34
- f"请给出一段贴心温暖、表达感谢和认可,并让用户感到愉快的回复:\n回复:")
35
- elif "NEGATIVE" in label or "LABEL_0" in label:
36
- prompt = (f"用户评论表达了负面情感:\n评论内容: {user_review}\n"
37
- f"请给出一段温柔体贴、安抚用户并表示歉意的回复:\n回复:")
38
- else:
39
- prompt = f"用户评论: {user_review}\n请给出一段中肯的回复:\n回复:"
40
-
41
- print("\n生成回复的提示词 (Prompt):")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  print(prompt)
43
 
44
- # 生成回复文本,设置 truncation=True 避免长度问题,并适当提高采样温度与 top_p 值
45
- generated = reply_generator(prompt, max_length=150, do_sample=True, top_p=0.95, temperature=0.9, truncation=True)
46
 
47
- generated_text = generated[0]['generated_text']
48
 
49
- # 如果生成结果包含完整 prompt,则剔除
50
- if generated_text.startswith(prompt):
51
- final_reply = generated_text[len(prompt):].strip()
52
- else:
53
- final_reply = generated_text.strip()
54
 
55
- print("\n生成的回复文本:")
56
- print(final_reply)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !pip install huggingface_hub
2
  !pip install --upgrade transformers
3
+ !pip install datasets soundfile
4
 
5
+ ##########################################
6
+ # Step 0: 导入必需的库
7
+ ##########################################
8
+
9
+ from transformers import pipeline, SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, AutoModelForCausalLM, AutoTokenizer, pipeline
10
+ from datasets import load_dataset
11
  from IPython.display import Audio, display
12
  import torch
13
+ import soundfile as sf
14
+
15
+ from google.colab import drive
16
+
17
+ from huggingface_hub import login
18
+
19
 
20
  ##########################################
21
  # Step 1:情感分析 - 分析用户评论的情感倾向
22
  ##########################################
23
+
24
+ pipe = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base")
25
+
26
+ # 使用 "j-hartmann/emotion-english-distilroberta-base" 模型进行多维度情感分类
27
+ emotion_classifier = pipeline(
28
+ "text-classification",
29
+ model="j-hartmann/emotion-english-distilroberta-base",
30
+ return_all_scores=True
31
+ )
32
 
33
  # 示例用户评论(可替换为实际评论)
34
  user_review = "I love the fast delivery, but the product quality could be better."
35
 
36
+ # 对评论进行情感分析
37
+ emotion_results = emotion_classifier(user_review)[0] # 返回列表中的第一个结果(单条输入)
38
+
39
+ # 打印所有情感维度及其分数
40
+ print("情感分析结果(多维度):")
41
+ for emotion in emotion_results:
42
+ print(f"{emotion['label']}: {emotion['score']:.4f}")
43
+
44
+ # 提取置信度最高的情感标签(可选)
45
+ dominant_emotion = max(emotion_results, key=lambda x: x['score'])
46
+ print("\n主导情感:", dominant_emotion['label'], f"(置信度: {dominant_emotion['score']:.2f})")
47
+
48
+
49
 
50
  ##########################################
51
  # Step 2:回复生成 - 根据情感生成自动回复
52
  ##########################################
53
+
54
+ emotion_strategies = {
55
+ "anger": {
56
+ "prompt": (
57
+ "Customer complaint: '{review}'\n\n"
58
+ "As a customer service representative, craft a professional response that:\n"
59
+ "- Begins with sincere apology and acknowledgment\n"
60
+ "- Clearly explains solution process with concrete steps\n"
61
+ "- Offers appropriate compensation/redemption\n"
62
+ "- Keeps tone humble and solution-focused (3-4 sentences)\n\n"
63
+ "Response:"
64
+ )
65
+ },
66
+ "disgust": {
67
+ "prompt": (
68
+ "Customer quality concern: '{review}'\n\n"
69
+ "As a customer service representative, craft a response that:\n"
70
+ "- Immediately acknowledges the product issue\n"
71
+ "- Explains quality control measures being taken\n"
72
+ "- Provides clear return/replacement instructions\n"
73
+ "- Offers goodwill gesture (3-4 sentences)\n\n"
74
+ "Response:"
75
+ )
76
+ },
77
+ "fear": {
78
+ "prompt": (
79
+ "Customer safety concern: '{review}'\n\n"
80
+ "As a customer service representative, craft a reassuring response that:\n"
81
+ "- Directly addresses the safety worries\n"
82
+ "- References relevant certifications/standards\n"
83
+ "- Offers dedicated support contact\n"
84
+ "- Provides satisfaction guarantee (3-4 sentences)\n\n"
85
+ "Response:"
86
+ )
87
+ },
88
+ "joy": {
89
+ "prompt": (
90
+ "Customer review: '{review}'\n\n"
91
+ "As a customer service representative, craft a concise response that:\n"
92
+ "- Specifically acknowledges both positive and constructive feedback\n"
93
+ "- Briefly mentions loyalty/referral programs\n"
94
+ "- Ends with shopping invitation (3-4 sentences)\n\n"
95
+ "Response:"
96
+ )
97
+ },
98
+ "neutral": {
99
+ "prompt": (
100
+ "Customer feedback: '{review}'\n\n"
101
+ "As a customer service representative, craft a balanced response that:\n"
102
+ "- Provides additional relevant product information\n"
103
+ "- Highlights key service features\n"
104
+ "- Politely requests more detailed feedback\n"
105
+ "- Maintains professional tone (3-4 sentences)\n\n"
106
+ "Response:"
107
+ )
108
+ },
109
+ "sadness": {
110
+ "prompt": (
111
+ "Customer disappointment: '{review}'\n\n"
112
+ "As a customer service representative, craft an empathetic response that:\n"
113
+ "- Shows genuine understanding of the issue\n"
114
+ "- Proposes personalized recovery solution\n"
115
+ "- Offers extended support options\n"
116
+ "- Maintains positive outlook (3-4 sentences)\n\n"
117
+ "Response:"
118
+ )
119
+ },
120
+ "surprise": {
121
+ "prompt": (
122
+ "Customer enthusiastic feedback: '{review}'\n\n"
123
+ "As a customer service representative, craft a response that:\n"
124
+ "- Matches customer's positive energy appropriately\n"
125
+ "- Highlights unexpected product benefits\n"
126
+ "- Invites to user community/events\n"
127
+ "- Maintains brand voice (3-4 sentences)\n\n"
128
+ "Response:"
129
+ )
130
+ }
131
+ }
132
+
133
+
134
+
135
+ # 生成回复Prompt
136
+ template = emotion_strategies[dominant_emotion['label'].lower()]["prompt"]
137
+ prompt = template.format(review=user_review)
138
  print(prompt)
139
 
 
 
140
 
 
141
 
 
 
 
 
 
142
 
143
+ # 加载Llama-3作为text generation模型
144
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B")
145
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-1B")
146
+
147
+
148
+
149
+
150
+ inputs = tokenizer(prompt, return_tensors="pt")
151
+ outputs = model.generate(**inputs, max_new_tokens=100)
152
+
153
+ input_length = inputs.input_ids.shape[1]
154
+ response = tokenizer.decode(outputs[0][input_length:], skip_special_tokens=True)
155
+ print(response)
156
+
157
+
158
+
159
+
160
+ ##########################################
161
+ # Step 3:语音生成 - 根据回复合成语音
162
+ ##########################################
163
+
164
+ # 加载模型和处理器
165
+ #processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
166
+ #speech_model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
167
+ #vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
168
+
169
+ processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
170
+ model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
171
+ vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
172
+
173
+ # 创建默认的说话人嵌入
174
+ embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
175
+ speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) # 女性中性语音
176
+
177
+ # 文本预处理和语音合成
178
+ inputs = processor(text=response, return_tensors="pt")
179
+ spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings)
180
+
181
+ # 使用声码器生成波形音频
182
+ with torch.no_grad():
183
+ speech = vocoder(spectrogram)
184
+
185
+ # 保存为WAV文件(16kHz采样率)
186
+ sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000)
187
+
188
+ print("语音生成完成,已保存为 customer_service_response.wav")