nuojohnchen commited on
Commit
b95f8c7
·
verified ·
1 Parent(s): c683b58

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -20
app.py CHANGED
@@ -1,19 +1,3 @@
1
- 此外,尽管我希望auto_examples的填充如 # Examples for auto-generation
2
- auto_examples = [
3
- ["Write a short poem about artificial intelligence",
4
- "Qwen/Qwen2.5-7B-Instruct",
5
- "Qwen/Qwen2.5-7B-Instruct",
6
- "01-ai/Yi-6B-Chat",
7
- "01-ai/Yi-6B-Chat"],
8
- ["我听说有些人有高血压却没有任何症状。这是真的吗?",
9
- "FreedomIntelligence/Apollo-7B",
10
- "FreedomIntelligence/Apollo-7B",
11
- "microsoft/phi-2",
12
- "openchat/openchat-3.5-0106"]
13
- ]这个所示,但是我希望呈现在前端的case只有instruction/question, model 1, model 2三列,这个可以实现吗
14
-
15
-
16
-
17
  import gradio as gr
18
  import os
19
  import spaces
@@ -127,17 +111,13 @@ def generate_response(instruction, model_path, progress=gr.Progress()):
127
  do_sample=True
128
  )
129
 
130
- # 清理响应,移除提示词部分
131
  full_response = response_tokenizer.decode(output[0], skip_special_tokens=True)
132
 
133
- # 移除提示部分
134
  clean_response = full_response.replace(f"<|user|>\n{instruction}\n<|assistant|>", "").strip()
135
 
136
- # 如果模型生成了多轮对话,只保留第一轮回答
137
  if "<|user|>" in clean_response:
138
  clean_response = clean_response.split("<|user|>")[0].strip()
139
 
140
- # 如果模型使用其他格式的多轮对话标记
141
  for token in ["<user>", "User:", "Human:"]:
142
  if token in clean_response:
143
  clean_response = clean_response.split(token)[0].strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import os
3
  import spaces
 
111
  do_sample=True
112
  )
113
 
 
114
  full_response = response_tokenizer.decode(output[0], skip_special_tokens=True)
115
 
 
116
  clean_response = full_response.replace(f"<|user|>\n{instruction}\n<|assistant|>", "").strip()
117
 
 
118
  if "<|user|>" in clean_response:
119
  clean_response = clean_response.split("<|user|>")[0].strip()
120
 
 
121
  for token in ["<user>", "User:", "Human:"]:
122
  if token in clean_response:
123
  clean_response = clean_response.split(token)[0].strip()