Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
add qwen2.5 32b as the default
Browse files
backend/routes/questions.py
CHANGED
@@ -37,7 +37,7 @@ async def get_benchmark_questions(session_id: str):
|
|
37 |
if single_dataset and len(single_dataset['train']) > 0:
|
38 |
# Prendre 2 questions à partir de l'index 1 (en évitant la première question)
|
39 |
start_idx = 1
|
40 |
-
max_questions = min(
|
41 |
for i in range(max_questions):
|
42 |
idx = start_idx + i
|
43 |
questions.append({
|
|
|
37 |
if single_dataset and len(single_dataset['train']) > 0:
|
38 |
# Prendre 2 questions à partir de l'index 1 (en évitant la première question)
|
39 |
start_idx = 1
|
40 |
+
max_questions = min(5, max(0, len(single_dataset['train']) - start_idx))
|
41 |
for i in range(max_questions):
|
42 |
idx = start_idx + i
|
43 |
questions.append({
|
backend/tasks/create_bench_config_file.py
CHANGED
@@ -122,7 +122,8 @@ class CreateBenchConfigTask:
|
|
122 |
# Define required models
|
123 |
required_models = [
|
124 |
# "Qwen/Qwen2.5-72B-Instruct"
|
125 |
-
"meta-llama/Llama-3.1-8B-Instruct"
|
|
|
126 |
]
|
127 |
|
128 |
# Track found models
|
@@ -165,11 +166,11 @@ class CreateBenchConfigTask:
|
|
165 |
"model_list": model_list,
|
166 |
|
167 |
"model_roles": {
|
168 |
-
"ingestion": ["
|
169 |
-
"summarization": ["
|
170 |
"chunking": ["intfloat/multilingual-e5-large-instruct"],
|
171 |
-
"single_shot_question_generation": ["
|
172 |
-
"multi_hop_question_generation": ["
|
173 |
},
|
174 |
"pipeline": {
|
175 |
"ingestion": {
|
@@ -200,7 +201,7 @@ class CreateBenchConfigTask:
|
|
200 |
"additional_instructions": "Generate rich and creative questions to test a curious adult",
|
201 |
"chunk_sampling": {
|
202 |
"mode": "count",
|
203 |
-
"value":
|
204 |
"random_seed": 123,
|
205 |
},
|
206 |
},
|
|
|
122 |
# Define required models
|
123 |
required_models = [
|
124 |
# "Qwen/Qwen2.5-72B-Instruct"
|
125 |
+
# "meta-llama/Llama-3.1-8B-Instruct"
|
126 |
+
"Qwen/Qwen2.5-32B-Instruct"
|
127 |
]
|
128 |
|
129 |
# Track found models
|
|
|
166 |
"model_list": model_list,
|
167 |
|
168 |
"model_roles": {
|
169 |
+
"ingestion": ["Qwen/Qwen2.5-32B-Instruct"],
|
170 |
+
"summarization": ["Qwen/Qwen2.5-32B-Instruct"],
|
171 |
"chunking": ["intfloat/multilingual-e5-large-instruct"],
|
172 |
+
"single_shot_question_generation": ["Qwen/Qwen2.5-32B-Instruct"],
|
173 |
+
"multi_hop_question_generation": ["Qwen/Qwen2.5-32B-Instruct"],
|
174 |
},
|
175 |
"pipeline": {
|
176 |
"ingestion": {
|
|
|
201 |
"additional_instructions": "Generate rich and creative questions to test a curious adult",
|
202 |
"chunk_sampling": {
|
203 |
"mode": "count",
|
204 |
+
"value": 10,
|
205 |
"random_seed": 123,
|
206 |
},
|
207 |
},
|
backend/tasks/evaluation_task.py
CHANGED
@@ -319,6 +319,7 @@ TASKS_TABLE = [yourbench]
|
|
319 |
models = [
|
320 |
"Qwen/QwQ-32B",
|
321 |
"Qwen/Qwen2.5-72B-Instruct",
|
|
|
322 |
"meta-llama/Llama-3.1-8B-Instruct",
|
323 |
"meta-llama/Llama-3.3-70B-Instruct",
|
324 |
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
|
319 |
models = [
|
320 |
"Qwen/QwQ-32B",
|
321 |
"Qwen/Qwen2.5-72B-Instruct",
|
322 |
+
"Qwen/Qwen2.5-32B-Instruct",
|
323 |
"meta-llama/Llama-3.1-8B-Instruct",
|
324 |
"meta-llama/Llama-3.3-70B-Instruct",
|
325 |
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
backend/tasks/get_available_model_provider.py
CHANGED
@@ -136,6 +136,7 @@ if __name__ == "__main__":
|
|
136 |
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
137 |
"mistralai/Mistral-Small-24B-Instruct-2501",
|
138 |
"meta-llama/Llama-3.1-8B-Instruct",
|
|
|
139 |
]
|
140 |
|
141 |
providers = []
|
|
|
136 |
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
137 |
"mistralai/Mistral-Small-24B-Instruct-2501",
|
138 |
"meta-llama/Llama-3.1-8B-Instruct",
|
139 |
+
"Qwen/Qwen2.5-32B-Instruct"
|
140 |
]
|
141 |
|
142 |
providers = []
|