Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ from smolagents import CodeAgent, DuckDuckGoSearchTool, OpenAIServerModel, tool,
|
|
2 |
import gradio as gr
|
3 |
import requests
|
4 |
import pytz
|
5 |
-
from typing import Optional, Tuple, Union, Any, List, Dict
|
6 |
import re
|
7 |
# from google.colab import userdata # Assuming Colab environment
|
8 |
import io
|
@@ -273,96 +273,36 @@ def agent_thread_func(agent, query, log_queue, result_queue):
|
|
273 |
|
274 |
# Generator function for Gradio streaming
|
275 |
# REMOVED the return type hint -> Iterator[...]
|
276 |
-
|
|
|
277 |
"""
|
278 |
-
Runs the
|
279 |
-
|
280 |
-
Yields: (chatbot_history, final_answer_status)
|
281 |
"""
|
282 |
if height_agent is None:
|
283 |
error_msg = initialization_error_message or "Agent not initialized."
|
284 |
-
|
285 |
-
yield ([{"role": "assistant", "content": error_msg}], "Error: Agent not initialized.")
|
286 |
-
return # Stop the generator
|
287 |
-
|
288 |
-
log_queue = queue.Queue()
|
289 |
-
result_queue = queue.Queue()
|
290 |
-
# History will be a list of dictionaries: [{"role": "assistant", "content": "..."}]
|
291 |
-
# We'll just use one dictionary and update its content for the streaming log
|
292 |
-
chatbot_history = []
|
293 |
-
current_log_message = "" # Accumulate lines into one message block
|
294 |
-
final_answer = "⏳ Running..." # Initial status
|
295 |
-
|
296 |
-
# Initial yield to clear previous state and show "Running"
|
297 |
-
# Yield empty history initially, or a starting message
|
298 |
-
yield ([], final_answer)
|
299 |
-
|
300 |
-
# Start the agent thread
|
301 |
-
thread = threading.Thread(
|
302 |
-
target=agent_thread_func,
|
303 |
-
args=(height_agent, query, log_queue, result_queue)
|
304 |
-
)
|
305 |
-
thread.start()
|
306 |
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
except queue.Empty:
|
326 |
-
# No new message, brief pause to prevent busy-waiting
|
327 |
-
# Also check if the thread is still alive; if not, break (error case)
|
328 |
-
if not thread.is_alive() and result_queue.empty():
|
329 |
-
print("Warning: Agent thread finished unexpectedly without result.")
|
330 |
-
# Attempt to retrieve any remaining logs
|
331 |
-
try:
|
332 |
-
while True: # Get all remaining logs
|
333 |
-
log_line = log_queue.get_nowait()
|
334 |
-
if log_line: current_log_message += log_line + "\n"
|
335 |
-
else: break # Should not happen if None was already processed, but safety
|
336 |
-
except queue.Empty:
|
337 |
-
pass # No more logs
|
338 |
-
current_log_message += "\nError: Agent stopped unexpectedly."
|
339 |
-
chatbot_history = [{"role": "assistant", "content": current_log_message}]
|
340 |
-
final_answer = "Error: Agent stopped unexpectedly."
|
341 |
-
yield (chatbot_history, final_answer)
|
342 |
-
return # Stop
|
343 |
-
|
344 |
-
time.sleep(0.1) # Pause briefly
|
345 |
-
|
346 |
-
# Agent thread has finished (log_queue received None)
|
347 |
-
thread.join() # Wait for the thread to fully terminate
|
348 |
-
|
349 |
-
# Get the final result or exception
|
350 |
-
final_result = result_queue.get()
|
351 |
-
|
352 |
-
if isinstance(final_result, Exception):
|
353 |
-
final_answer = f"Error during execution: {final_result}"
|
354 |
-
# Append error to the chatbot log
|
355 |
-
error_log = f"\n\n--- EXECUTION ERROR ---\n{final_result}"
|
356 |
-
current_log_message += error_log
|
357 |
-
chatbot_history = [{"role": "assistant", "content": current_log_message}]
|
358 |
-
else:
|
359 |
-
final_answer = final_result # This is the actual final answer string
|
360 |
-
|
361 |
-
# Final yield with the complete log and the final answer
|
362 |
-
# Ensure history is in the correct format before the final yield
|
363 |
-
chatbot_history = [{"role": "assistant", "content": current_log_message}]
|
364 |
-
yield (chatbot_history, final_answer)
|
365 |
|
|
|
366 |
|
367 |
# --- Build Gradio Interface Manually with gr.Blocks ---
|
368 |
print("--- Building Gradio Interface with gr.Blocks ---")
|
@@ -386,18 +326,21 @@ with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
|
386 |
gr.Markdown("## Agent Reasoning Steps")
|
387 |
# --- CHANGE THIS ---
|
388 |
# reasoning_output = gr.Code(label="Reasoning Log", language="markdown", interactive=False, lines=20)
|
389 |
-
|
|
|
390 |
label="Reasoning Log",
|
391 |
-
|
392 |
-
|
393 |
-
|
|
|
|
|
394 |
|
395 |
|
396 |
# Link components - ensure outputs match the function's yield tuple order
|
397 |
submit_button.click(
|
398 |
fn=run_agent_wrapper,
|
399 |
inputs=query_input,
|
400 |
-
outputs=[
|
401 |
)
|
402 |
# --- END OF CHANGE ---
|
403 |
|
|
|
2 |
import gradio as gr
|
3 |
import requests
|
4 |
import pytz
|
5 |
+
from typing import Optional, Tuple, Union, Any, List, Dict
|
6 |
import re
|
7 |
# from google.colab import userdata # Assuming Colab environment
|
8 |
import io
|
|
|
273 |
|
274 |
# Generator function for Gradio streaming
|
275 |
# REMOVED the return type hint -> Iterator[...]
|
276 |
+
# --- REPLACE the current run_agent_wrapper function WITH THIS ---
|
277 |
+
def run_agent_wrapper(query: str) -> Tuple[str, str]:
|
278 |
"""
|
279 |
+
Runs the height_agent synchronously and captures its stdout (reasoning steps).
|
280 |
+
Returns (reasoning_log, final_answer). NO STREAMING.
|
|
|
281 |
"""
|
282 |
if height_agent is None:
|
283 |
error_msg = initialization_error_message or "Agent not initialized."
|
284 |
+
return (error_msg, "Agent failed to initialize. See reasoning log.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
285 |
|
286 |
+
print(f"\n--- Running agent for query: '{query}' ---")
|
287 |
+
log_stream = io.StringIO()
|
288 |
+
final_answer = "Error during execution." # Default message
|
289 |
+
|
290 |
+
try:
|
291 |
+
# Redirect stdout to capture prints from agent.run()
|
292 |
+
with contextlib.redirect_stdout(log_stream):
|
293 |
+
# Run the agent directly (prints are captured)
|
294 |
+
final_answer = height_agent.run(query)
|
295 |
+
print("\n--- Agent execution finished. ---") # Add marker to log
|
296 |
+
except Exception as e:
|
297 |
+
print(f"\n--- Error during agent execution wrapper: {e} ---")
|
298 |
+
traceback.print_exc(file=log_stream) # Print exception to log stream
|
299 |
+
final_answer = f"An error occurred in the wrapper. See reasoning log. Error: {e}"
|
300 |
+
finally:
|
301 |
+
reasoning_log = log_stream.getvalue()
|
302 |
+
log_stream.close()
|
303 |
+
print("--- Finished capturing stdout. ---") # Log to console, not captured
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
304 |
|
305 |
+
return reasoning_log, final_answer
|
306 |
|
307 |
# --- Build Gradio Interface Manually with gr.Blocks ---
|
308 |
print("--- Building Gradio Interface with gr.Blocks ---")
|
|
|
326 |
gr.Markdown("## Agent Reasoning Steps")
|
327 |
# --- CHANGE THIS ---
|
328 |
# reasoning_output = gr.Code(label="Reasoning Log", language="markdown", interactive=False, lines=20)
|
329 |
+
# --- REPLACE Chatbot definition WITH THIS ---
|
330 |
+
reasoning_output = gr.Code(
|
331 |
label="Reasoning Log",
|
332 |
+
language="markdown", # Use markdown for good text/code display
|
333 |
+
interactive=False,
|
334 |
+
lines=20
|
335 |
+
)
|
336 |
+
# --- END OF REPLACEMENT ---
|
337 |
|
338 |
|
339 |
# Link components - ensure outputs match the function's yield tuple order
|
340 |
submit_button.click(
|
341 |
fn=run_agent_wrapper,
|
342 |
inputs=query_input,
|
343 |
+
outputs=[reasoning_output, final_answer_output]
|
344 |
)
|
345 |
# --- END OF CHANGE ---
|
346 |
|