|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
pipe = pipeline("text-generation", model="X-D-Lab/MindChat-Qwen2-0_5B") |
|
|
|
|
|
history = [] |
|
|
|
|
|
def format_prompt(history, user_input): |
|
prompt = "" |
|
for i, (user, assistant) in enumerate(history): |
|
prompt += f"[Round {i+1}]\n问:{user}\n答:{assistant}\n" |
|
prompt += f"[Round {len(history)+1}]\n问:{user_input}\n答:" |
|
return prompt |
|
|
|
|
|
def respond(message): |
|
global history |
|
prompt = format_prompt(history, message) |
|
result = pipe(prompt, max_new_tokens=200, do_sample=True, temperature=0.7)[0]["generated_text"] |
|
|
|
|
|
if "答:" in result: |
|
answer = result.split("答:")[-1].strip() |
|
else: |
|
answer = result.strip() |
|
|
|
|
|
history.append((message, answer)) |
|
return answer |
|
|
|
|
|
def reset(): |
|
global history |
|
history = [] |
|
return "已重置对话历史。" |
|
|
|
|
|
gr.Interface( |
|
fn=respond, |
|
inputs="text", |
|
outputs="text", |
|
title="MindChat对话演示", |
|
description="基于 X-D-Lab/MindChat-Qwen2-0_5B 构建的中文聊天机器人", |
|
allow_flagging="never", |
|
live=False |
|
).launch(share=True) |