removed all return states and just did yield for bot_comms()
Browse files
app.py
CHANGED
@@ -191,24 +191,25 @@ async def bot_comms(input_text: str,
|
|
191 |
|
192 |
if input_text == "mode":
|
193 |
if llm_mode == "":
|
194 |
-
|
195 |
else:
|
196 |
-
|
197 |
|
198 |
if input_text == "check cuda":
|
199 |
-
|
|
|
200 |
|
201 |
if input_text == "switch to llama":
|
202 |
llm_mode = input_text
|
203 |
-
|
204 |
|
205 |
if input_text == "switch to gpt-4o":
|
206 |
llm_mode = input_text
|
207 |
-
|
208 |
|
209 |
if input_text == "switch to gpt-3.5-turbo":
|
210 |
llm_mode = input_text
|
211 |
-
|
212 |
|
213 |
if llm_mode == "switch to llama":
|
214 |
streamer = loki_generation(input_text=input_text,
|
|
|
191 |
|
192 |
if input_text == "mode":
|
193 |
if llm_mode == "":
|
194 |
+
yield "The mode is currently at Loki Default mode"
|
195 |
else:
|
196 |
+
yield f"The current mode: {llm_mode}"
|
197 |
|
198 |
if input_text == "check cuda":
|
199 |
+
cuda_info = check_cuda()
|
200 |
+
yield cuda_info
|
201 |
|
202 |
if input_text == "switch to llama":
|
203 |
llm_mode = input_text
|
204 |
+
yield "Got it! Llama is now activate for your questions only π¦"
|
205 |
|
206 |
if input_text == "switch to gpt-4o":
|
207 |
llm_mode = input_text
|
208 |
+
yield "Understood! GPT-4o is now hearing your responses only πΎ"
|
209 |
|
210 |
if input_text == "switch to gpt-3.5-turbo":
|
211 |
llm_mode = input_text
|
212 |
+
yield "Done. GPT-3.5-turbo is ready for your questions! π"
|
213 |
|
214 |
if llm_mode == "switch to llama":
|
215 |
streamer = loki_generation(input_text=input_text,
|