Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -121,29 +121,11 @@ def generate_response_non_streaming(instruction, model_name, temperature=0.7, ma
|
|
121 |
load_message = load_model(model_name)
|
122 |
if "failed" in load_message.lower():
|
123 |
return load_message
|
124 |
-
|
125 |
try:
|
126 |
-
#
|
127 |
-
|
128 |
-
|
129 |
-
messages = [
|
130 |
-
{"role": "system", "content": SYSTEM_PROMPT},
|
131 |
-
{"role": "user", "content": instruction}
|
132 |
-
]
|
133 |
-
|
134 |
-
# 使用模型的聊天模板格式化输入
|
135 |
-
chat_input = current_tokenizer.apply_chat_template(
|
136 |
-
messages,
|
137 |
-
tokenize=True,
|
138 |
-
return_tensors="pt"
|
139 |
-
).to(current_model.device)
|
140 |
-
else:
|
141 |
-
# 使用指定的提示格式
|
142 |
-
prompt = f"User:{instruction}\nAssistant:"
|
143 |
-
chat_input = current_tokenizer.encode(prompt, return_tensors="pt").to(current_model.device)
|
144 |
-
|
145 |
-
# 获取<|endoftext|>的token id,用于停止生成
|
146 |
-
eos_token_id = current_tokenizer.eos_token_id
|
147 |
|
148 |
# 生成响应
|
149 |
output = current_model.generate(
|
@@ -160,6 +142,44 @@ def generate_response_non_streaming(instruction, model_name, temperature=0.7, ma
|
|
160 |
except Exception as e:
|
161 |
return f"生成响应时出错: {str(e)}"
|
162 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
def update_chat_with_response(chatbot, instruction, model_name, temperature, max_tokens):
|
164 |
"""Updates the chatbot with non-streaming response"""
|
165 |
global current_model, current_tokenizer, current_model_path
|
|
|
121 |
load_message = load_model(model_name)
|
122 |
if "failed" in load_message.lower():
|
123 |
return load_message
|
124 |
+
|
125 |
try:
|
126 |
+
# 直接使用简单的提示格式,不使用模型的聊天模板
|
127 |
+
prompt = f"User:{instruction}\nAssistant:"
|
128 |
+
chat_input = current_tokenizer.encode(prompt, return_tensors="pt").to(current_model.device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
|
130 |
# 生成响应
|
131 |
output = current_model.generate(
|
|
|
142 |
except Exception as e:
|
143 |
return f"生成响应时出错: {str(e)}"
|
144 |
|
145 |
+
# try:
|
146 |
+
# # 检查模型是否有聊天模板
|
147 |
+
# if hasattr(current_tokenizer, 'chat_template') and current_tokenizer.chat_template:
|
148 |
+
# # 使用模型的聊天模板
|
149 |
+
# messages = [
|
150 |
+
# {"role": "system", "content": SYSTEM_PROMPT},
|
151 |
+
# {"role": "user", "content": instruction}
|
152 |
+
# ]
|
153 |
+
|
154 |
+
# # 使用模型的聊天模板格式化输入
|
155 |
+
# chat_input = current_tokenizer.apply_chat_template(
|
156 |
+
# messages,
|
157 |
+
# tokenize=True,
|
158 |
+
# return_tensors="pt"
|
159 |
+
# ).to(current_model.device)
|
160 |
+
# else:
|
161 |
+
# # 使用指定的提示格式
|
162 |
+
# prompt = f"User:{instruction}\nAssistant:"
|
163 |
+
# chat_input = current_tokenizer.encode(prompt, return_tensors="pt").to(current_model.device)
|
164 |
+
|
165 |
+
# # 获取<|endoftext|>的token id,用于停止生成
|
166 |
+
# eos_token_id = current_tokenizer.eos_token_id
|
167 |
+
|
168 |
+
# # 生成响应
|
169 |
+
# output = current_model.generate(
|
170 |
+
# input_ids=chat_input,
|
171 |
+
# max_new_tokens=max_tokens,
|
172 |
+
# temperature=temperature,
|
173 |
+
# do_sample=(temperature > 0),
|
174 |
+
# eos_token_id=current_tokenizer.eos_token_id # 使用<|endoftext|>作为停止标记
|
175 |
+
# )
|
176 |
+
|
177 |
+
# # 解码并返回生成的文本
|
178 |
+
# generated_text = current_tokenizer.decode(output[0][len(chat_input[0]):], skip_special_tokens=True)
|
179 |
+
# return generated_text
|
180 |
+
# except Exception as e:
|
181 |
+
# return f"生成响应时出错: {str(e)}"
|
182 |
+
|
183 |
def update_chat_with_response(chatbot, instruction, model_name, temperature, max_tokens):
|
184 |
"""Updates the chatbot with non-streaming response"""
|
185 |
global current_model, current_tokenizer, current_model_path
|