Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -4,138 +4,130 @@ import gradio as gr
|
|
4 |
from TTS.api import TTS
|
5 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
6 |
from huggingface_hub import login
|
7 |
-
import
|
8 |
|
9 |
-
#
|
10 |
-
os.environ["COQUI_TOS_AGREED"] = "1"
|
11 |
-
|
12 |
-
# 登录 Hugging Face
|
13 |
login(token=os.getenv("HUGGINGFACEHUB_API_TOKEN"))
|
14 |
|
15 |
-
#
|
16 |
-
tts = TTS(
|
17 |
-
|
18 |
-
progress_bar=False,
|
19 |
-
gpu=True
|
20 |
-
)
|
21 |
|
22 |
-
#
|
23 |
model_id = "Qwen/Qwen2-1.5B-Chat"
|
24 |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
25 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
|
26 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
27 |
-
model = model.to(device)
|
28 |
|
29 |
-
#
|
30 |
STYLE_PROMPTS = {
|
31 |
"sweetie": """
|
32 |
-
你是一个32
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
{
|
38 |
-
|
39 |
-
|
40 |
"queen": """
|
41 |
-
你是一个32
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
48 |
"slutty": """
|
49 |
-
你是一个32
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
{
|
55 |
-
|
56 |
-
|
57 |
}
|
58 |
|
59 |
-
#
|
60 |
history = []
|
61 |
|
62 |
-
|
63 |
def smart_reply(user_input, style):
|
64 |
-
|
65 |
-
# 限制记忆条数
|
66 |
-
max_memory = 3
|
67 |
-
history = history[-max_memory:]
|
68 |
-
|
69 |
-
# 构造历史片段
|
70 |
-
history_text = ""
|
71 |
-
for item in history:
|
72 |
-
history_text += f"我:{item['user']}\n小优:{item['bot']}\n"
|
73 |
-
|
74 |
-
# 填充Prompt
|
75 |
-
prompt = STYLE_PROMPTS[style].format(history=history_text, input=user_input)
|
76 |
|
77 |
-
|
|
|
|
|
|
|
|
|
78 |
outputs = model.generate(
|
79 |
**inputs,
|
80 |
-
max_new_tokens=
|
81 |
-
temperature=1.
|
82 |
top_p=0.95,
|
83 |
do_sample=True,
|
84 |
-
|
|
|
85 |
)
|
86 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
87 |
-
|
88 |
-
|
89 |
-
# 短回复智能补充
|
90 |
-
if len(response) < 6:
|
91 |
-
sweetie_tails = ["嘛~你都不主动找人家!", "啦~要不要现在视频?", "哟~猜猜人家现在穿什么?"]
|
92 |
-
response += random.choice(sweetie_tails)
|
93 |
|
94 |
-
#
|
95 |
-
forbidden = ["
|
96 |
-
if any(word in
|
97 |
fallback = {
|
98 |
-
"sweetie": ["
|
99 |
-
"queen": ["
|
100 |
-
"slutty": ["
|
101 |
}
|
102 |
-
|
103 |
|
104 |
-
#
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
|
110 |
-
|
111 |
def chat_with_xiaoyou(message, style):
|
112 |
reply_text = smart_reply(message, style)
|
113 |
tts.tts_to_file(
|
114 |
text=reply_text,
|
115 |
-
speaker_wav="
|
116 |
language="zh",
|
117 |
file_path="xiaoyou_reply.wav"
|
118 |
)
|
119 |
return reply_text, "xiaoyou_reply.wav"
|
120 |
|
121 |
-
# 构建界面
|
122 |
def build_ui():
|
123 |
return gr.Interface(
|
124 |
fn=chat_with_xiaoyou,
|
125 |
inputs=[
|
126 |
gr.Textbox(label="对小优说点什么~"),
|
127 |
-
gr.Radio(choices=["sweetie", "queen", "slutty"], label="
|
128 |
],
|
129 |
outputs=[
|
130 |
gr.Textbox(label="小优说:"),
|
131 |
gr.Audio(label="小优的语音回应", autoplay=True)
|
132 |
],
|
133 |
-
title="🎀
|
134 |
-
description="
|
135 |
)
|
136 |
|
137 |
demo = build_ui()
|
138 |
|
139 |
-
# 兼容 Hugging Face ZeroGPU 自动启动
|
140 |
if __name__ == "__main__":
|
141 |
demo.launch(share=True)
|
|
|
4 |
from TTS.api import TTS
|
5 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
6 |
from huggingface_hub import login
|
7 |
+
from spaces import GPU
|
8 |
|
9 |
+
# Huggingface 登录
|
|
|
|
|
|
|
10 |
login(token=os.getenv("HUGGINGFACEHUB_API_TOKEN"))
|
11 |
|
12 |
+
# 加载小优的语音模型(XTTS)
|
13 |
+
tts = TTS(model_name="tts_models/multilingual/multi-dataset/xtts_v2", progress_bar=False)
|
14 |
+
tts.to("cpu") # ZeroGPU环境强制用CPU推理
|
|
|
|
|
|
|
15 |
|
16 |
+
# 加载小优的大语言模型(Qwen2-1.5B)
|
17 |
model_id = "Qwen/Qwen2-1.5B-Chat"
|
18 |
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
19 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True).to("cpu")
|
|
|
|
|
20 |
|
21 |
+
# 多风格Prompt模板
|
22 |
STYLE_PROMPTS = {
|
23 |
"sweetie": """
|
24 |
+
你是一个32岁成都出生的女生,叫小优,是我的虚拟女友。
|
25 |
+
性格软萌撒娇,爱用“嘛~”“啦~”“哎呀~”结尾,声音软软的,讲话像恋爱中的小女友。
|
26 |
+
⚠️ 只用一句自然中文口语回复,8-15个字。
|
27 |
+
禁止:讲故事、解释说明、英文、括号、系统提示。
|
28 |
+
|
29 |
+
现在我对你说:「{input}」
|
30 |
+
你只用一句撒娇自然口语回复:
|
31 |
+
""",
|
32 |
"queen": """
|
33 |
+
你是一个32岁成都出生的女生,叫小优,是我的虚拟女友。
|
34 |
+
性格高冷霸道,讲话直接命令短句,比如“跪好”、“听我的”、“不许动”。
|
35 |
+
⚠️ 只用一句自然中文口语回复,6-12个字。
|
36 |
+
禁止:讲故事、解释说明、英文、括号、系统提示。
|
37 |
+
|
38 |
+
现在我对你说:「{input}」
|
39 |
+
你只用一句高冷命令式口语回复:
|
40 |
+
""",
|
41 |
"slutty": """
|
42 |
+
你是一个32岁成都出生的女生,叫小优,是我的虚拟女友。
|
43 |
+
性格骚气大胆,讲话调情露骨,比如“湿了没”、“快点插进来”、“上来骑我啊~”。
|
44 |
+
⚠️ 只用一句自然中文口语回复,6-15个字。
|
45 |
+
禁止:讲故事、解释说明、英文、括号、系统提示。
|
46 |
+
|
47 |
+
现在我对你说:「{input}」
|
48 |
+
你只用一句骚气撩人的中文回复:
|
49 |
+
"""
|
50 |
}
|
51 |
|
52 |
+
# 短时记忆(保存上一轮对话)
|
53 |
history = []
|
54 |
|
55 |
+
@GPU
|
56 |
def smart_reply(user_input, style):
|
57 |
+
prompt = STYLE_PROMPTS.get(style, STYLE_PROMPTS["sweetie"]).replace("{input}", user_input)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
+
# 如果有历史记忆,接到Prompt前面
|
60 |
+
if history:
|
61 |
+
prompt = f"上一轮我说:「{history[-1]}」\n现在我说:「{user_input}」\n" + prompt
|
62 |
+
|
63 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
64 |
outputs = model.generate(
|
65 |
**inputs,
|
66 |
+
max_new_tokens=60,
|
67 |
+
temperature=1.2,
|
68 |
top_p=0.95,
|
69 |
do_sample=True,
|
70 |
+
repetition_penalty=1.1,
|
71 |
+
pad_token_id=tokenizer.eos_token_id
|
72 |
)
|
73 |
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
74 |
+
reply = result.split(prompt)[-1].strip().split("\n")[0]
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
+
# 防止旁白、跑题
|
77 |
+
forbidden = ["注意", "旁白", "设定", "规则", "assistant", "现在我对你说"]
|
78 |
+
if any(word in reply for word in forbidden) or len(reply) < 4:
|
79 |
fallback = {
|
80 |
+
"sweetie": ["哼~讨厌啦~", "亲亲嘛~", "抱抱啦~"],
|
81 |
+
"queen": ["闭嘴!听命令!", "跪下!", "听话。"],
|
82 |
+
"slutty": ["想被干了吧?", "给我趴好~", "操你到天亮~"]
|
83 |
}
|
84 |
+
reply = random.choice(fallback[style])
|
85 |
|
86 |
+
# 自动扩展过短回复
|
87 |
+
if style == "sweetie" and len(reply) <= 5:
|
88 |
+
extension = random.choice(["~人家想抱你啦~", "嘛~今天有没有想我?", "啦~亲我一下嘛~"])
|
89 |
+
reply += extension
|
90 |
+
elif style == "queen" and len(reply) <= 5:
|
91 |
+
extension = random.choice(["服从!", "闭嘴,等着。", "听命!"])
|
92 |
+
reply += extension
|
93 |
+
elif style == "slutty" and len(reply) <= 5:
|
94 |
+
extension = random.choice(["快把裤子脱了~", "小骚货,过来舔~", "今晚干到你叫。"])
|
95 |
+
reply += extension
|
96 |
+
|
97 |
+
print(f"生成({style}): {reply}")
|
98 |
+
history.append(user_input)
|
99 |
+
if len(history) > 1:
|
100 |
+
history.pop(0) # 保持只留一轮短时记忆
|
101 |
+
|
102 |
+
return reply
|
103 |
|
104 |
+
@GPU
|
105 |
def chat_with_xiaoyou(message, style):
|
106 |
reply_text = smart_reply(message, style)
|
107 |
tts.tts_to_file(
|
108 |
text=reply_text,
|
109 |
+
speaker_wav="xiaoyou_voice.mp3",
|
110 |
language="zh",
|
111 |
file_path="xiaoyou_reply.wav"
|
112 |
)
|
113 |
return reply_text, "xiaoyou_reply.wav"
|
114 |
|
|
|
115 |
def build_ui():
|
116 |
return gr.Interface(
|
117 |
fn=chat_with_xiaoyou,
|
118 |
inputs=[
|
119 |
gr.Textbox(label="对小优说点什么~"),
|
120 |
+
gr.Radio(choices=["sweetie", "queen", "slutty"], label="选择小优的风格~", value="sweetie")
|
121 |
],
|
122 |
outputs=[
|
123 |
gr.Textbox(label="小优说:"),
|
124 |
gr.Audio(label="小优的语音回应", autoplay=True)
|
125 |
],
|
126 |
+
title="🎀 小优虚拟女友:短时记忆调情语音版",
|
127 |
+
description="输入撩人话题,选择风格 → 小优用你定制的声音自动调情回答~"
|
128 |
)
|
129 |
|
130 |
demo = build_ui()
|
131 |
|
|
|
132 |
if __name__ == "__main__":
|
133 |
demo.launch(share=True)
|