mostafa-sh commited on
Commit
d340abc
·
1 Parent(s): 3b70bb3

fix model selection

Browse files
Files changed (1) hide show
  1. app.py +10 -4
app.py CHANGED
@@ -104,9 +104,15 @@ with st.sidebar:
104
  use_expert_answer = st.toggle("Use expert answer", value=True)
105
  show_expert_responce = st.toggle("Show initial expert answer", value=False)
106
 
107
- model = st.selectbox("Choose the LLM model", ["gpt-4o-mini", "gpt-3.5-turbo", "llama-tommi-0.35"], key='a1model')
 
 
 
 
 
 
108
 
109
- if model == "llama-tommi-0.35":
110
  tommi_do_sample = st.toggle("Enable Sampling", value=False, key='tommi_sample')
111
 
112
  if tommi_do_sample:
@@ -220,7 +226,7 @@ if submit_button_placeholder.button("AI Answer", type="primary"):
220
  context += context_item['text'] + '\n\n'
221
 
222
  if use_expert_answer:
223
- if model == "llama-tommi-0.35":
224
  if 'tommi_model' not in st.session_state:
225
  tommi_model, tommi_tokenizer = load_fine_tuned_model(adapter_path, base_model_path)
226
  st.session_state.tommi_model = tommi_model
@@ -242,7 +248,7 @@ if submit_button_placeholder.button("AI Answer", type="primary"):
242
  num_beams=tommi_num_beams if not tommi_do_sample else 1,
243
  max_new_tokens=tommi_max_new_tokens
244
  )
245
- elif model in ["gpt-4o-mini", "gpt-3.5-turbo"]:
246
  expert_answer = openai_domain_specific_answer_generation(
247
  get_expert_system_prompt(),
248
  st.session_state.question,
 
104
  use_expert_answer = st.toggle("Use expert answer", value=True)
105
  show_expert_responce = st.toggle("Show initial expert answer", value=False)
106
 
107
+ st.session_state.expert_model = st.selectbox(
108
+ "Choose the LLM model",
109
+ ["gpt-4o-mini",
110
+ "gpt-3.5-turbo",
111
+ "llama-tommi-0.35"],
112
+ key='a1model'
113
+ )
114
 
115
+ if st.session_state.expert_model == "llama-tommi-0.35":
116
  tommi_do_sample = st.toggle("Enable Sampling", value=False, key='tommi_sample')
117
 
118
  if tommi_do_sample:
 
226
  context += context_item['text'] + '\n\n'
227
 
228
  if use_expert_answer:
229
+ if st.session_state.expert_model == "llama-tommi-0.35":
230
  if 'tommi_model' not in st.session_state:
231
  tommi_model, tommi_tokenizer = load_fine_tuned_model(adapter_path, base_model_path)
232
  st.session_state.tommi_model = tommi_model
 
248
  num_beams=tommi_num_beams if not tommi_do_sample else 1,
249
  max_new_tokens=tommi_max_new_tokens
250
  )
251
+ elif st.session_state.expert_model in ["gpt-4o-mini", "gpt-3.5-turbo"]:
252
  expert_answer = openai_domain_specific_answer_generation(
253
  get_expert_system_prompt(),
254
  st.session_state.question,