Update app.py
Browse files
app.py
CHANGED
@@ -2,14 +2,48 @@ import gradio as gr
|
|
2 |
from transformers import pipeline
|
3 |
|
4 |
# Use a pipeline as a high-level helper
|
5 |
-
messages = [
|
6 |
-
{"role": "user", "content": "Who are you?"},
|
7 |
-
]
|
8 |
pipe = pipeline("text-generation", model="X-D-Lab/MindChat-Qwen2-0_5B")
|
9 |
-
pipe(messages)
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
def respond(message):
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from transformers import pipeline
|
3 |
|
4 |
# Use a pipeline as a high-level helper
|
|
|
|
|
|
|
5 |
pipe = pipeline("text-generation", model="X-D-Lab/MindChat-Qwen2-0_5B")
|
|
|
6 |
|
7 |
+
# 初始化对话历史
|
8 |
+
history = []
|
9 |
+
|
10 |
+
# 设置 prompt 模板
|
11 |
+
def format_prompt(history, user_input):
|
12 |
+
prompt = ""
|
13 |
+
for i, (user, assistant) in enumerate(history):
|
14 |
+
prompt += f"[Round {i+1}]\n问:{user}\n答:{assistant}\n"
|
15 |
+
prompt += f"[Round {len(history)+1}]\n问:{user_input}\n答:"
|
16 |
+
return prompt
|
17 |
+
|
18 |
+
# 响应函数
|
19 |
def respond(message):
|
20 |
+
global history
|
21 |
+
prompt = format_prompt(history, message)
|
22 |
+
result = pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.7)[0]["generated_text"]
|
23 |
+
|
24 |
+
# 提取模型回答部分(去掉提示部分)
|
25 |
+
if "答:" in result:
|
26 |
+
answer = result.split("答:")[-1].strip()
|
27 |
+
else:
|
28 |
+
answer = result.strip()
|
29 |
+
|
30 |
+
# 更新历史
|
31 |
+
history.append((message, answer))
|
32 |
+
return answer
|
33 |
+
|
34 |
+
# 重置对话
|
35 |
+
def reset():
|
36 |
+
global history
|
37 |
+
history = []
|
38 |
+
return "已重置对话历史。"
|
39 |
|
40 |
+
# 创建界面
|
41 |
+
gr.Interface(
|
42 |
+
fn=respond,
|
43 |
+
inputs="text",
|
44 |
+
outputs="text",
|
45 |
+
title="🧠 MindChat 多轮对话演示",
|
46 |
+
description="基于 X-D-Lab/MindChat-Qwen2-0_5B 构建的中文聊天机器人,支持多轮上下文记忆。",
|
47 |
+
allow_flagging="never",
|
48 |
+
live=False
|
49 |
+
).launch(share=True)
|