sumuks commited on
Commit
89c3b6d
·
verified ·
1 Parent(s): 5288fd2

temporarily change to llama 8b to allow more scale

Browse files
backend/tasks/create_bench_config_file.py CHANGED
@@ -121,7 +121,8 @@ class CreateBenchConfigTask:
121
 
122
  # Define required models
123
  required_models = [
124
- "Qwen/Qwen2.5-72B-Instruct"
 
125
  ]
126
 
127
  # Track found models
@@ -164,11 +165,11 @@ class CreateBenchConfigTask:
164
  "model_list": model_list,
165
 
166
  "model_roles": {
167
- "ingestion": ["Qwen/Qwen2.5-72B-Instruct"],
168
- "summarization": ["Qwen/Qwen2.5-72B-Instruct"],
169
  "chunking": ["intfloat/multilingual-e5-large-instruct"],
170
- "single_shot_question_generation": ["Qwen/Qwen2.5-72B-Instruct"],
171
- "multi_hop_question_generation": ["Qwen/Qwen2.5-72B-Instruct"],
172
  },
173
  "pipeline": {
174
  "ingestion": {
 
121
 
122
  # Define required models
123
  required_models = [
124
+ # "Qwen/Qwen2.5-72B-Instruct"
125
+ "meta-llama/Llama-3.1-8B-Instruct"
126
  ]
127
 
128
  # Track found models
 
165
  "model_list": model_list,
166
 
167
  "model_roles": {
168
+ "ingestion": ["meta-llama/Llama-3.1-8B-Instruct"],
169
+ "summarization": ["meta-llama/Llama-3.1-8B-Instruct"],
170
  "chunking": ["intfloat/multilingual-e5-large-instruct"],
171
+ "single_shot_question_generation": ["meta-llama/Llama-3.1-8B-Instruct"],
172
+ "multi_hop_question_generation": ["meta-llama/Llama-3.1-8B-Instruct"],
173
  },
174
  "pipeline": {
175
  "ingestion": {