Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,69 +1,220 @@
|
|
1 |
-
from smolagents import CodeAgent,DuckDuckGoSearchTool,
|
2 |
-
|
3 |
import requests
|
4 |
import pytz
|
5 |
-
import
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
from Gradio_UI import GradioUI
|
9 |
-
|
10 |
-
# Below is an example of a tool that does nothing. Amaze us with your creativity !
|
11 |
@tool
|
12 |
-
def
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
Args:
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
18 |
"""
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
@tool
|
22 |
-
def
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
Args:
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
26 |
"""
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
)
|
48 |
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
agent = CodeAgent(
|
57 |
-
model=model,
|
58 |
-
tools=[final_answer], ## add your tools here (don't remove final answer)
|
59 |
-
max_steps=6,
|
60 |
-
verbosity_level=1,
|
61 |
-
grammar=None,
|
62 |
-
planning_interval=None,
|
63 |
-
name=None,
|
64 |
-
description=None,
|
65 |
-
prompt_templates=prompt_templates
|
66 |
)
|
67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
-
|
|
|
|
1 |
+
from smolagents import CodeAgent, DuckDuckGoSearchTool, OpenAIServerModel, tool, FinalAnswerTool, VisitWebpageTool, GradioUI, LiteLLMModel, GradioUI
|
2 |
+
# Import GradioUI
|
3 |
import requests
|
4 |
import pytz
|
5 |
+
from typing import Optional, Tuple, Union, Any # Added Any
|
6 |
+
import re
|
7 |
+
from google.colab import userdata # Assuming Colab environment
|
8 |
+
import io
|
9 |
+
import contextlib
|
10 |
+
import sys
|
11 |
+
import traceback
|
12 |
|
|
|
|
|
|
|
13 |
@tool
|
14 |
+
def parse_height_from_text(
|
15 |
+
text: str,
|
16 |
+
prefer_units: str = "cm",
|
17 |
+
max_expected: float = 1000.0
|
18 |
+
) -> Optional[float]:
|
19 |
+
"""
|
20 |
+
Extracts and converts the FIRST valid height measurement found in a given text string into centimeters.
|
21 |
+
|
22 |
+
**Usage Workflow:**
|
23 |
+
1. Use this FIRST on the initial user query to get the user's height. Store this value.
|
24 |
+
2. LATER, after getting web search results, you might use this again on individual search result snippets
|
25 |
+
if they contain height information (e.g., "Character X is 6'2\" tall").
|
26 |
+
|
27 |
Args:
|
28 |
+
text: Input text containing potential height measurements (can be user query or web search snippet).
|
29 |
+
prefer_units: Preferred unit system ('cm', 'm', 'ft', 'in') if units are ambiguous in the text. Default is 'cm'.
|
30 |
+
max_expected: Safety limit to ignore potentially nonsensical values during parsing (in cm).
|
31 |
+
|
32 |
+
Returns:
|
33 |
+
float | None: Height in centimeters if a valid measurement is found and parsed, otherwise None.
|
34 |
"""
|
35 |
+
height_pattern = r"""
|
36 |
+
(?:^|\b|(?<=\s))(\d+\.?\d*)\s*(?:(cm|centi.*)|(m|meters?|metres)|(ft|feet|')|(in|inches?|"))\b
|
37 |
+
"""
|
38 |
+
matches = re.finditer(height_pattern, text, re.IGNORECASE | re.VERBOSE | re.UNICODE)
|
39 |
+
unit_conversion = {"cm": 1.0, "m": 100.0, "ft": 30.48, "in": 2.54}
|
40 |
+
for match in matches:
|
41 |
+
try:
|
42 |
+
value = float(match.group(1))
|
43 |
+
raw_unit = next((g for g in match.groups()[1:] if g), "").lower()
|
44 |
+
if any(u in raw_unit for u in ["cm", "centi"]): unit = "cm"
|
45 |
+
elif any(u in raw_unit for u in ["m", "meter", "metre"]): unit = "m"
|
46 |
+
elif any(u in raw_unit for u in ["ft", "feet", "'"]): unit = "ft"
|
47 |
+
elif any(u in raw_unit for u in ["in", "inch", "\""]): unit = "in"
|
48 |
+
else: unit = prefer_units
|
49 |
+
converted = value * unit_conversion[unit]
|
50 |
+
if 0.1 < converted < max_expected: return round(converted, 2)
|
51 |
+
except (ValueError, KeyError, TypeError): continue
|
52 |
+
return None
|
53 |
|
54 |
@tool
|
55 |
+
def create_comparison_statement(
|
56 |
+
target: str,
|
57 |
+
user_height: float,
|
58 |
+
reference_height: float,
|
59 |
+
) -> str:
|
60 |
+
"""
|
61 |
+
Creates ONE human-readable comparison statement based on height proximity. Output format example:
|
62 |
+
"👤 You're almost the same height as Sherlock Holmes! (185.0 cm vs 183.0 cm)"
|
63 |
+
|
64 |
+
**Usage Workflow:**
|
65 |
+
1. Call this tool *AFTER* finding a target name, extracting their height, and validating it (e.g., `if 50 < reference_height < 250:`).
|
66 |
+
2. Call this for *each* validated target you want to include.
|
67 |
+
3. Collect the string outputs and combine them for the final answer.
|
68 |
+
|
69 |
Args:
|
70 |
+
target: The name of the character/object/person being compared against (extracted from search results).
|
71 |
+
user_height: The user's height in centimeters.
|
72 |
+
reference_height: The specific reference target's height in centimeters (parsed and VALIDATED from search results).
|
73 |
+
|
74 |
+
Returns:
|
75 |
+
str: A single formatted comparison string indicating height similarity.
|
76 |
"""
|
77 |
+
diff = user_height - reference_height
|
78 |
+
abs_diff = abs(diff)
|
79 |
+
comparison_phrase = ""
|
80 |
+
|
81 |
+
# Define thresholds for different phrases (adjust as needed)
|
82 |
+
exact_threshold = 1.0 # Within 1 cm difference
|
83 |
+
close_threshold = 4.0 # Within 4 cm difference
|
84 |
+
|
85 |
+
if abs_diff <= exact_threshold:
|
86 |
+
comparison_phrase = f"You're exactly the same height as {target}!"
|
87 |
+
elif abs_diff <= close_threshold:
|
88 |
+
if diff > 0:
|
89 |
+
comparison_phrase = f"You're slightly taller than {target}!"
|
90 |
+
else:
|
91 |
+
comparison_phrase = f"You're slightly shorter than {target}!"
|
92 |
+
elif diff > 0: # User is significantly taller
|
93 |
+
comparison_phrase = f"You're noticeably taller than {target}."
|
94 |
+
else: # User is significantly shorter
|
95 |
+
comparison_phrase = f"You're noticeably shorter than {target}."
|
96 |
+
|
97 |
+
# Use a simple emoji or none
|
98 |
+
emoji = "👤"
|
99 |
+
|
100 |
+
return (
|
101 |
+
f"{emoji} {comparison_phrase} "
|
102 |
+
f"({user_height:.1f} cm vs {reference_height:.1f} cm)"
|
103 |
+
)
|
104 |
+
|
105 |
+
|
106 |
+
# --- Instantiate Model ---
|
107 |
+
try:
|
108 |
+
OR_API_KEY = userdata.get("OR_TOKEN")
|
109 |
+
if not OR_API_KEY: raise ValueError("OR_TOKEN not found in Colab userdata.")
|
110 |
+
except (ImportError, NameError):
|
111 |
+
import os
|
112 |
+
OR_API_KEY = os.environ.get("OR_TOKEN")
|
113 |
+
if not OR_API_KEY: raise ValueError("API Key OR_TOKEN not found in environment variables.")
|
114 |
+
|
115 |
+
model = OpenAIServerModel(
|
116 |
+
model_id='qwen/qwen-2.5-coder-32b-instruct:free',
|
117 |
+
api_base='https://openrouter.ai/api/v1',
|
118 |
+
api_key=userdata.get("OR_TOKEN"),
|
119 |
)
|
120 |
|
121 |
|
122 |
+
|
123 |
+
# Replace all calls to HfApiModel
|
124 |
+
llm_model = LiteLLMModel(
|
125 |
+
model_id="gemini/gemini-2.0-flash", # you can see other model names here: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models. It is important to prefix the name with "gemini/"
|
126 |
+
api_key=userdata.get("GEM_TOKEN"),
|
127 |
+
max_tokens=8192
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
)
|
129 |
|
130 |
+
# --- Task Generation Function (No change needed here) ---
|
131 |
+
# It generates the *instructions* for the agent run
|
132 |
+
def create_height_comparison_task(user_query: str) -> str:
|
133 |
+
"""Combines user query with detailed instructions encouraging diverse searches and robust parsing."""
|
134 |
+
escaped_query = user_query.replace("'", "\\'") # Simple escaping
|
135 |
+
|
136 |
+
instructions = f"""
|
137 |
+
TASK: Analyze the user query '{escaped_query}' and perform the following steps to find height comparisons with **diverse figures (people, characters)**:
|
138 |
+
|
139 |
+
1. **Parse User Height:** Use `parse_height_from_text` on the user query ('{escaped_query}') to get the user's height in cm. Print and store it. If none found, use `final_answer` to ask for clarification like "Please provide your height clearly (e.g., '180 cm', '5 ft 11 in').".
|
140 |
+
2. **Web Search (Diverse Queries):** If height found, use `web_search` to find **fictional characters, historical figures, scientists, artists, athletes, and other interesting people** of similar height. Formulate 2-3 specific queries using the user's height in cm (e.g., if user height is 180cm, search for `"historical figures 180 cm tall"`, `"celebrities around 180cm height"`, `"fictional characters exactly 180 cm"`). Print the search results clearly.
|
141 |
+
3. **Extract & Validate from Search Results:** CRITICAL STEP. Read the `web_search` Observation snippets carefully.
|
142 |
+
* Identify potential (Name, Height String) pairs. Prioritize clear mentions of height linked to a name.
|
143 |
+
* For each potential pair:
|
144 |
+
* Use `parse_height_from_text` on the relevant part of the search snippet string containing the height info. Store the result in cm (e.g., `extracted_cm`).
|
145 |
+
* **Validate using Python code:** Check if `extracted_cm` is NOT `None` AND if it's within a reasonable human range (e.g., `if extracted_cm is not None and 50 < extracted_cm < 250:`).
|
146 |
+
* Collect valid (Name, Validated Height cm) pairs into a Python list. Print this list. Aim for diverse examples.
|
147 |
+
4. **Generate Multiple Comparisons:** Check the validated matches list.
|
148 |
+
* If empty after searching, use `final_answer` stating no relevant matches were found for that height.
|
149 |
+
* If matches exist, select **up to 3-4 diverse ones**.
|
150 |
+
* Create an empty list `comparison_outputs = []`.
|
151 |
+
* **Loop** through the selected matches. For each (name, ref_height_cm), call `create_comparison_statement(target=name, user_height=USER_HEIGHT_CM, reference_height=ref_height_cm)`. Append the resulting string to `comparison_outputs`.
|
152 |
+
5. **Final Answer:** Combine the generated strings from `comparison_outputs` into a single response (e.g., separated by newlines: `"\\n".join(comparison_outputs)`). Add a brief introductory sentence like "Here are some figures with similar heights:". Return the complete message using `final_answer`.
|
153 |
+
|
154 |
+
Follow Thought-Code-Observation meticulously. Handle `None` returns from `parse_height_from_text` gracefully in your Python code logic. Use the tools as described in their docstrings.
|
155 |
+
"""
|
156 |
+
return instructions
|
157 |
+
|
158 |
+
|
159 |
+
|
160 |
+
# --- Define the Subclassed Agent ---
|
161 |
+
class HeightComparisonAgent(CodeAgent):
|
162 |
+
"""
|
163 |
+
An agent that intercepts the user query in the run method,
|
164 |
+
transforms it into a detailed task using create_height_comparison_task,
|
165 |
+
and then executes the detailed task using the parent CodeAgent's run method.
|
166 |
+
This allows GradioUI to monitor the execution of the *detailed* task.
|
167 |
+
"""
|
168 |
+
def run(self, task: str, **kwargs: Any) -> str:
|
169 |
+
"""
|
170 |
+
Overrides the default run method.
|
171 |
+
'task' received here is expected to be the raw user query from GradioUI.
|
172 |
+
"""
|
173 |
+
user_query = task # Assume the input 'task' is the user query
|
174 |
+
print(f"[HeightComparisonAgent] Intercepted run call with user query: '{user_query}'")
|
175 |
+
|
176 |
+
if not user_query or not user_query.strip():
|
177 |
+
return "Please enter a valid query." # Handle empty input
|
178 |
+
|
179 |
+
# 1. Generate the detailed task description using the helper function
|
180 |
+
detailed_task = create_height_comparison_task(user_query)
|
181 |
+
print(f"[HeightComparisonAgent] Generated detailed task (first 200 chars): {detailed_task[:200]}...")
|
182 |
+
|
183 |
+
# 2. Call the *parent* class's run method with the DETAILED task
|
184 |
+
# This is the core step. super().run() executes the actual agent logic
|
185 |
+
# that GradioUI is presumably monitoring via its verbose output.
|
186 |
+
print(f"[HeightComparisonAgent] Calling super().run() with the detailed task...")
|
187 |
+
try:
|
188 |
+
# Pass the generated 'detailed_task' as the 'task' argument to the parent's run method
|
189 |
+
final_result = super().run(task=detailed_task, **kwargs)
|
190 |
+
print(f"[HeightComparisonAgent] super().run() finished.")
|
191 |
+
# GradioUI should display the final_result automatically
|
192 |
+
return final_result
|
193 |
+
except Exception as e:
|
194 |
+
print(f"[HeightComparisonAgent] Error during super().run(): {e}")
|
195 |
+
traceback.print_exc()
|
196 |
+
# Return a user-friendly error message
|
197 |
+
return f"An error occurred while processing your request: {e}"
|
198 |
+
|
199 |
+
|
200 |
+
# --- Instantiate the Subclassed Agent ---
|
201 |
+
# IMPORTANT: Use the HeightComparisonAgent class, not CodeAgent directly.
|
202 |
+
# Set verbosity_level=3 so the parent's run method (super().run) generates the verbose output.
|
203 |
+
height_agent = HeightComparisonAgent(
|
204 |
+
tools=[DuckDuckGoSearchTool(), VisitWebpageTool(), parse_height_from_text, create_comparison_statement, FinalAnswerTool()],
|
205 |
+
model=llm_model,
|
206 |
+
verbosity_level=3, # <<< Crucial for GradioUI to see the steps from the parent run
|
207 |
+
max_steps=20, # Increased slightly just in case
|
208 |
+
# planning_interval=3, # Optional
|
209 |
+
)
|
210 |
+
|
211 |
+
# --- Launch Gradio using GradioUI and the custom agent ---
|
212 |
+
print("--- Starting Gradio Interface with GradioUI and HeightComparisonAgent ---")
|
213 |
+
|
214 |
+
# GradioUI will call height_agent.run(user_input)
|
215 |
+
# Our overridden run method will preprocess the input and call super().run(detailed_task)
|
216 |
+
# GradioUI should then display the thinking steps from super().run()
|
217 |
+
ui = GradioUI(agent=height_agent)
|
218 |
|
219 |
+
# Launch the UI
|
220 |
+
ui.launch() # Use debug=True for Gradio logs
|