sandz7 commited on
Commit
18a692b
Β·
1 Parent(s): c0248ea

added empty returns on input_text conditions for ending the function run

Browse files
Files changed (1) hide show
  1. app.py +5 -0
app.py CHANGED
@@ -134,8 +134,10 @@ def bot_comms(input_text: str,
134
  if input_text == "mode":
135
  if llm_mode == "":
136
  yield "The mode is currently at Loki Default mode"
 
137
  else:
138
  yield f"The current mode: {llm_mode}"
 
139
 
140
  if input_text == "check cuda":
141
  cuda_info = check_cuda()
@@ -144,14 +146,17 @@ def bot_comms(input_text: str,
144
  if input_text == "switch to llama":
145
  llm_mode = input_text
146
  yield "Got it! Llama is now activate for your questions only πŸ¦™"
 
147
 
148
  if input_text == "switch to gpt-4o":
149
  llm_mode = input_text
150
  yield "Understood! GPT-4o is now hearing your responses only πŸ‘Ύ"
 
151
 
152
  if input_text == "switch to gpt-3.5-turbo":
153
  llm_mode = input_text
154
  yield "Done. GPT-3.5-turbo is ready for your questions! πŸƒ"
 
155
 
156
  if llm_mode == "switch to llama":
157
  streamer = llama_generation(input_text=input_text, history=history, temperature=temperature, max_new_tokens=max_new_tokens)
 
134
  if input_text == "mode":
135
  if llm_mode == "":
136
  yield "The mode is currently at Loki Default mode"
137
+ return
138
  else:
139
  yield f"The current mode: {llm_mode}"
140
+ return
141
 
142
  if input_text == "check cuda":
143
  cuda_info = check_cuda()
 
146
  if input_text == "switch to llama":
147
  llm_mode = input_text
148
  yield "Got it! Llama is now activate for your questions only πŸ¦™"
149
+ return
150
 
151
  if input_text == "switch to gpt-4o":
152
  llm_mode = input_text
153
  yield "Understood! GPT-4o is now hearing your responses only πŸ‘Ύ"
154
+ return
155
 
156
  if input_text == "switch to gpt-3.5-turbo":
157
  llm_mode = input_text
158
  yield "Done. GPT-3.5-turbo is ready for your questions! πŸƒ"
159
+ return
160
 
161
  if llm_mode == "switch to llama":
162
  streamer = llama_generation(input_text=input_text, history=history, temperature=temperature, max_new_tokens=max_new_tokens)