Abbasid commited on
Commit
3f109c4
·
verified ·
1 Parent(s): 325ec94

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +114 -17
app.py CHANGED
@@ -1,5 +1,4 @@
1
  from smolagents import CodeAgent, DuckDuckGoSearchTool, OpenAIServerModel, tool, FinalAnswerTool, VisitWebpageTool, GradioUI, LiteLLMModel
2
- from Gradio_UI import GradioUI
3
  import requests
4
  import pytz
5
  from typing import Optional, Tuple, Union, Any # Added Any
@@ -201,24 +200,122 @@ class HeightComparisonAgent(CodeAgent):
201
  # --- Instantiate the Subclassed Agent ---
202
  # IMPORTANT: Use the HeightComparisonAgent class, not CodeAgent directly.
203
  # Set verbosity_level=3 so the parent's run method (super().run) generates the verbose output.
 
 
 
 
204
  if llm_model is not None:
205
- height_agent = HeightComparisonAgent(
206
- tools=[DuckDuckGoSearchTool(), VisitWebpageTool(), parse_height_from_text, create_comparison_statement, FinalAnswerTool()],
207
- model=llm_model,
208
- verbosity_level=3, # <<< Crucial for GradioUI to see the steps from the parent run
209
- max_steps=20, # Increased slightly just in case
210
- # planning_interval=3, # Optional
211
- )
 
 
 
 
 
 
212
  else:
213
- print('Failed to Load LiteLLM Model')
 
 
 
 
 
 
 
 
214
 
215
- # --- Launch Gradio using GradioUI and the custom agent ---
216
- print("--- Starting Gradio Interface with GradioUI and HeightComparisonAgent ---")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
 
218
- # GradioUI will call height_agent.run(user_input)
219
- # Our overridden run method will preprocess the input and call super().run(detailed_task)
220
- # GradioUI should then display the thinking steps from super().run()
221
- ui = GradioUI(agent=height_agent)
 
 
 
222
 
223
- # Launch the UI
224
- ui.launch() # Use debug=True for Gradio logs
 
 
 
 
 
 
 
 
 
 
 
1
  from smolagents import CodeAgent, DuckDuckGoSearchTool, OpenAIServerModel, tool, FinalAnswerTool, VisitWebpageTool, GradioUI, LiteLLMModel
 
2
  import requests
3
  import pytz
4
  from typing import Optional, Tuple, Union, Any # Added Any
 
200
  # --- Instantiate the Subclassed Agent ---
201
  # IMPORTANT: Use the HeightComparisonAgent class, not CodeAgent directly.
202
  # Set verbosity_level=3 so the parent's run method (super().run) generates the verbose output.
203
+ # --- Instantiate the Agent ---
204
+ height_agent = None
205
+ initialization_error_message = None # <<< Make sure this line is BEFORE the if
206
+
207
  if llm_model is not None:
208
+ try:
209
+ height_agent = HeightComparisonAgent(
210
+ tools=[DuckDuckGoSearchTool(), VisitWebpageTool(), parse_height_from_text, create_comparison_statement, FinalAnswerTool()],
211
+ model=llm_model,
212
+ verbosity_level=3, # <<< ESSENTIAL for capturing reasoning steps
213
+ max_steps=20,
214
+ )
215
+ print("--- HeightComparisonAgent initialized successfully. ---")
216
+ except Exception as e:
217
+ # Store the error if agent creation fails even with a model
218
+ initialization_error_message = f"ERROR: Failed to initialize HeightComparisonAgent: {e}\n{traceback.format_exc()}"
219
+ print(initialization_error_message)
220
+ height_agent = None # Ensure agent is None on error
221
  else:
222
+ # Store the error if the LLM model itself failed to initialize
223
+ initialization_error_message = (
224
+ "ERROR: Could not initialize any Language Model backend.\n\n"
225
+ f"Please check the Space logs (check the 'Logs' tab above the app).\n"
226
+ f"Verify that at least one of these secrets is correctly set in Space Settings -> Secrets:\n"
227
+ f"Also ensure necessary libraries are in requirements.txt."
228
+ )
229
+ print(initialization_error_message)
230
+ # height_agent is already None
231
 
232
+ # --- Wrapper Function to Run Agent and Capture Output ---
233
+ def run_agent_wrapper(query: str) -> Tuple[str, str]:
234
+ """
235
+ Runs the height_agent and captures its stdout (reasoning steps).
236
+ Returns (reasoning_log, final_answer).
237
+ """
238
+ # Access the global variables
239
+ global height_agent, initialization_error_message
240
+
241
+ if height_agent is None:
242
+ # If agent initialization failed, return the stored error message
243
+ return (initialization_error_message or "Agent not initialized (unknown error).",
244
+ "Agent failed to initialize. See reasoning log for details.")
245
+
246
+ print(f"\n--- Running agent for query: '{query}' ---") # Log to console
247
+ log_stream = io.StringIO()
248
+ final_answer = "Agent execution did not complete." # Default message
249
+
250
+ try:
251
+ # Redirect stdout to capture prints from agent.run() (due to verbosity=3)
252
+ with contextlib.redirect_stdout(log_stream):
253
+ # Make sure to call the run method of the specific agent instance
254
+ final_answer = height_agent.run(query) # Pass the raw query
255
+ print("\n--- Agent execution finished successfully. ---") # Add marker to log
256
+ except Exception as e:
257
+ print(f"\n--- Error during agent execution wrapper: {e} ---") # Log to console
258
+ # Print exception details *into the captured log*
259
+ print("\n\n******** ERROR DURING EXECUTION ********\n", file=log_stream)
260
+ traceback.print_exc(file=log_stream)
261
+ final_answer = f"An error occurred during processing. See reasoning log. Error: {e}"
262
+ finally:
263
+ reasoning_log = log_stream.getvalue()
264
+ log_stream.close()
265
+ print("--- Finished capturing stdout. ---") # Log to console
266
+
267
+ return reasoning_log, final_answer
268
+ # --- Build Gradio Interface Manually with gr.Blocks ---
269
+ print("--- Building Gradio Interface with gr.Blocks ---")
270
+
271
+ # Make sure theme is applied correctly if desired
272
+ # theme = gr.themes.Default() # Or another theme
273
+ # with gr.Blocks(theme=theme, css="footer {visibility: hidden}") as demo:
274
+ with gr.Blocks(css="footer {visibility: hidden}") as demo: # Hides the default footer
275
+ gr.Markdown("# Height Comparison Agent")
276
+ gr.Markdown("Enter your height (e.g., '180 cm', '5ft 11in') to find characters/figures of similar height.")
277
+
278
+ with gr.Row():
279
+ with gr.Column(scale=1):
280
+ query_input = gr.Textbox(
281
+ label="Your Query (including height)",
282
+ placeholder="e.g., I am 175cm tall",
283
+ lines=2 # Allow slightly more room for input
284
+ )
285
+ submit_button = gr.Button("Compare Heights", variant="primary")
286
+ with gr.Column(scale=2):
287
+ final_answer_output = gr.Textbox(
288
+ label="Final Answer",
289
+ interactive=False,
290
+ lines=5
291
+ )
292
+
293
+ gr.Markdown("## Agent Reasoning Steps")
294
+ # Use gr.Code for better formatting of logs, especially if they contain code blocks
295
+ reasoning_output = gr.Code(
296
+ label="Reasoning Log",
297
+ language="text", # Use 'markdown' if logs might contain markdown
298
+ interactive=False,
299
+ lines=20
300
+ )
301
 
302
+ # Link components: When button is clicked, call wrapper, update outputs
303
+ submit_button.click(
304
+ fn=run_agent_wrapper, # Function to call
305
+ inputs=[query_input], # Component(s) providing input
306
+ outputs=[reasoning_output, final_answer_output] # Components to update
307
+ # Ensure the order matches the return tuple from run_agent_wrapper: (log, answer)
308
+ )
309
 
310
+ # Add an example input
311
+ gr.Examples(
312
+ examples=[
313
+ "I am 188cm tall",
314
+ "How tall is someone who is 5 foot 8 inches?",
315
+ "My height is 1.65m",
316
+ ],
317
+ inputs=query_input
318
+ )
319
+ # --- Launch Gradio ---
320
+ print("--- Launching Gradio demo ---")
321
+ demo.launch(ssr=False) # ssr=False recommended, share=True not needed for Spaces