sandz7 commited on
Commit
8987295
Β·
1 Parent(s): 65f60f3

places asyncio for stopasynciteration bug

Browse files
Files changed (1) hide show
  1. app.py +22 -4
app.py CHANGED
@@ -5,6 +5,7 @@ from huggingface_hub import login
5
  import os
6
  from threading import Thread
7
  from openai import OpenAI
 
8
 
9
  TOKEN = os.getenv('HF_AUTH_TOKEN')
10
  login(token=TOKEN,
@@ -171,10 +172,17 @@ def check_cuda():
171
  first_time = True
172
  llm_mode = ""
173
 
174
- def bot_comms(input_text: str,
175
- history: list,
176
- temperature: float,
177
- max_new_tokens: int):
 
 
 
 
 
 
 
178
  """
179
  The connection between gradio and the LLM's
180
  """
@@ -248,6 +256,16 @@ def bot_comms(input_text: str,
248
  outputs.append(text)
249
  yield "".join(outputs)
250
 
 
 
 
 
 
 
 
 
 
 
251
  chatbot=gr.Chatbot(height=600, label="Loki AI")
252
 
253
  with gr.Blocks(fill_height=True) as demo:
 
5
  import os
6
  from threading import Thread
7
  from openai import OpenAI
8
+ import asyncio
9
 
10
  TOKEN = os.getenv('HF_AUTH_TOKEN')
11
  login(token=TOKEN,
 
172
  first_time = True
173
  llm_mode = ""
174
 
175
+ # Async generator function
176
+ async def async_generator():
177
+ for i in range(5):
178
+ # Simulate an asynchronous operation
179
+ await asyncio.sleep(1)
180
+ yield i
181
+
182
+ async def bot_comms(input_text: str,
183
+ history: list,
184
+ temperature: float,
185
+ max_new_tokens: int):
186
  """
187
  The connection between gradio and the LLM's
188
  """
 
256
  outputs.append(text)
257
  yield "".join(outputs)
258
 
259
+ # Integration in your existing code
260
+ async def main():
261
+ async for value in async_generator():
262
+ print(value)
263
+ # Add your existing logic here, e.g., call bot_comms with appropriate arguments
264
+ await bot_comms(input_text="example", history=[], temperature=0.5, max_new_tokens=128)
265
+
266
+ # Rune async function
267
+ asyncio.run(main())
268
+
269
  chatbot=gr.Chatbot(height=600, label="Loki AI")
270
 
271
  with gr.Blocks(fill_height=True) as demo: