mostafa-sh commited on
Commit
3b70bb3
·
1 Parent(s): 4a6114a

fix model selection

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. utils/rag_utils.py +0 -1
app.py CHANGED
@@ -242,7 +242,7 @@ if submit_button_placeholder.button("AI Answer", type="primary"):
242
  num_beams=tommi_num_beams if not tommi_do_sample else 1,
243
  max_new_tokens=tommi_max_new_tokens
244
  )
245
- else:
246
  expert_answer = openai_domain_specific_answer_generation(
247
  get_expert_system_prompt(),
248
  st.session_state.question,
 
242
  num_beams=tommi_num_beams if not tommi_do_sample else 1,
243
  max_new_tokens=tommi_max_new_tokens
244
  )
245
+ elif model in ["gpt-4o-mini", "gpt-3.5-turbo"]:
246
  expert_answer = openai_domain_specific_answer_generation(
247
  get_expert_system_prompt(),
248
  st.session_state.question,
utils/rag_utils.py CHANGED
@@ -29,7 +29,6 @@ def load_summary(file_path):
29
 
30
  def fixed_knn_retrieval(question_embedding, context_embeddings, top_k=5, min_k=1):
31
 
32
-
33
  question_embedding = np.array(question_embedding)
34
 
35
  # Normalize
 
29
 
30
  def fixed_knn_retrieval(question_embedding, context_embeddings, top_k=5, min_k=1):
31
 
 
32
  question_embedding = np.array(question_embedding)
33
 
34
  # Normalize