CultriX commited on
Commit
4b2e1da
·
verified ·
1 Parent(s): 39c710d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -55
app.py CHANGED
@@ -311,29 +311,47 @@ async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, ap
311
  # -------------------- Process Generator and Human Input --------------------
312
  def process_conversation_generator(task_message: str, api_key: str, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue) -> Generator[str, None, None]:
313
  """
314
- Wraps the conversation and yields log messages. Handles human input.
315
  """
316
  log_q: queue.Queue = queue.Queue()
317
 
318
- def run_conversation() -> None:
319
- asyncio.run(multi_agent_conversation(task_message, log_q, api_key, human_in_the_loop_event, human_input_queue))
320
-
321
- thread = threading.Thread(target=run_conversation)
322
- thread.start()
323
 
 
324
  final_result = None
325
- while thread.is_alive() or not log_q.empty():
326
  try:
327
- msg = log_q.get(timeout=0.1)
328
  if isinstance(msg, tuple) and msg[0] == "result":
329
  final_result = msg[1]
330
- yield "Conversation complete."
 
331
  else:
332
- yield msg
333
  except queue.Empty:
334
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335
 
336
- thread.join()
337
  if final_result:
338
  conv_text = "\n=== Conversation ===\n"
339
  for entry in final_result:
@@ -372,48 +390,7 @@ def multi_agent_chat(message: str, history: List[Any], openai_api_key: str = Non
372
  human_in_the_loop_event = threading.Event()
373
  human_input_queue = queue.Queue() # Use a single queue for both requests and responses
374
 
375
- # Start the conversation in a separate thread
376
- conversation_thread = threading.Thread(
377
- target=lambda: asyncio.run(
378
- multi_agent_conversation(message, queue.Queue(), openai_api_key, human_in_the_loop_event, human_input_queue)
379
- )
380
- )
381
- conversation_thread.start()
382
-
383
-
384
- log_queue = queue.Queue() # Local log queue for this chat instance
385
- asyncio.run(multi_agent_conversation(message, log_queue, openai_api_key, human_in_the_loop_event, human_input_queue))
386
-
387
-
388
-
389
- while conversation_thread.is_alive() or not log_queue.empty() or human_in_the_loop_event.is_set():
390
- # Yield log messages
391
- try:
392
- log_message = log_queue.get_nowait() # Non-blocking get
393
- if isinstance(log_message, tuple) and log_message[0] == "result":
394
- final_result_text = "\n=== Conversation ===\n"
395
- for entry in log_message[1]:
396
- final_result_text+= f"[{entry['agent']}]: {entry['message']}\n\n"
397
- yield final_result_text
398
- else:
399
- yield log_message
400
- except queue.Empty:
401
- pass
402
-
403
- # Handle human feedback requests
404
- if human_in_the_loop_event.is_set():
405
- yield "Waiting for human feedback..."
406
- try:
407
- feedback_request = human_input_queue.get(timeout=0.1) # Get context for the request.
408
- human_interface = get_human_feedback(feedback_request) #Show context in the input box
409
- yield gr.Textbox.update(visible=False), gr.update(visible=True) # Show the human feedback interface.
410
- human_in_the_loop_event.wait() # Wait until the event is cleared
411
-
412
- except queue.Empty:
413
- pass
414
- await asyncio.sleep(0.1) # Prevent busy-waiting
415
-
416
- conversation_thread.join()
417
 
418
 
419
  # -------------------- Launch the Chatbot --------------------
@@ -437,4 +414,6 @@ dummy_iface = gr.Interface(lambda x:x, "textbox", "textbox")
437
 
438
  if __name__ == "__main__":
439
  demo = gr.TabbedInterface([iface, dummy_iface], ["Chatbot", "Dummy"])
440
- demo.launch(share=True)
 
 
 
311
  # -------------------- Process Generator and Human Input --------------------
312
  def process_conversation_generator(task_message: str, api_key: str, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue) -> Generator[str, None, None]:
313
  """
314
+ Wraps the conversation, yields log messages, and handles human input within a single thread.
315
  """
316
  log_q: queue.Queue = queue.Queue()
317
 
318
+ # Run the multi-agent conversation *synchronously* within this function.
319
+ asyncio.run(multi_agent_conversation(task_message, log_q, api_key, human_in_the_loop_event, human_input_queue))
 
 
 
320
 
321
+ # Process the log queue and handle human-in-the-loop
322
  final_result = None
323
+ while True: # Loop indefinitely to handle multiple potential human feedback requests.
324
  try:
325
+ msg = log_q.get_nowait() # Non-blocking get from the log queue.
326
  if isinstance(msg, tuple) and msg[0] == "result":
327
  final_result = msg[1]
328
+ yield "Conversation complete." # Indicate completion.
329
+ break # Exit the loop after processing the final result.
330
  else:
331
+ yield msg # Yield the log message.
332
  except queue.Empty:
333
+ pass # No log message available, continue checking for human input.
334
+
335
+
336
+ if human_in_the_loop_event.is_set():
337
+ yield "Waiting for human feedback..." # Indicate waiting state.
338
+ try:
339
+ feedback_request = human_input_queue.get(
340
+ timeout=0.1) # Get the context/question for feedback.
341
+ human_interface = get_human_feedback(feedback_request)
342
+ yield gr.Textbox.update(visible=False), gr.update(visible=True)
343
+ human_feedback = human_input_queue.get(
344
+ timeout=300) # Wait (block) for human feedback, with a timeout.
345
+ human_in_the_loop_event.clear() # Reset the event after getting feedback.
346
+ yield gr.Textbox.update(visible=True), human_interface.close() # Hide feedback UI.
347
+
348
+
349
+ except queue.Empty:
350
+ pass
351
+ # Add a small sleep to avoid busy-waiting and reduce CPU usage.
352
+ time.sleep(0.1)
353
+
354
 
 
355
  if final_result:
356
  conv_text = "\n=== Conversation ===\n"
357
  for entry in final_result:
 
390
  human_in_the_loop_event = threading.Event()
391
  human_input_queue = queue.Queue() # Use a single queue for both requests and responses
392
 
393
+ yield from process_conversation_generator(message, openai_api_key, human_in_the_loop_event, human_input_queue)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
394
 
395
 
396
  # -------------------- Launch the Chatbot --------------------
 
414
 
415
  if __name__ == "__main__":
416
  demo = gr.TabbedInterface([iface, dummy_iface], ["Chatbot", "Dummy"])
417
+ demo.launch(share=True)
418
+
419
+ import time #Import the time module