jairo commited on
Commit
7a87ce1
·
1 Parent(s): ae7a494

my first ai agent

Browse files
.gitignore ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ *.pyw
6
+ *.pyz
7
+
8
+ .venv/
9
+ .gradio/
Gradio_UI.py CHANGED
@@ -19,21 +19,26 @@ import re
19
  import shutil
20
  from typing import Optional
21
 
22
- from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
 
 
 
 
 
23
  from smolagents.agents import ActionStep, MultiStepAgent
24
  from smolagents.memory import MemoryStep
25
  from smolagents.utils import _is_package_available
26
 
27
 
28
- def pull_messages_from_step(
29
- step_log: MemoryStep,
30
- ):
31
  """Extract ChatMessage objects from agent steps with proper nesting"""
32
  import gradio as gr
33
 
34
  if isinstance(step_log, ActionStep):
35
  # Output the step number
36
- step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
 
 
37
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
38
 
39
  # First yield the thought/reasoning from the LLM
@@ -41,9 +46,15 @@ def pull_messages_from_step(
41
  # Clean up the LLM output
42
  model_output = step_log.model_output.strip()
43
  # Remove any trailing <end_code> and extra backticks, handling multiple possible formats
44
- model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code>
45
- model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>```
46
- model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code>
 
 
 
 
 
 
47
  model_output = model_output.strip()
48
  yield gr.ChatMessage(role="assistant", content=model_output)
49
 
@@ -63,8 +74,12 @@ def pull_messages_from_step(
63
 
64
  if used_code:
65
  # Clean up the content by removing any end code tags
66
- content = re.sub(r"```.*?\n", "", content) # Remove existing code blocks
67
- content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
 
 
 
 
68
  content = content.strip()
69
  if not content.startswith("```python"):
70
  content = f"```python\n{content}\n```"
@@ -90,7 +105,11 @@ def pull_messages_from_step(
90
  yield gr.ChatMessage(
91
  role="assistant",
92
  content=f"{log_content}",
93
- metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
 
 
 
 
94
  )
95
 
96
  # Nesting any errors under the tool call
@@ -98,7 +117,11 @@ def pull_messages_from_step(
98
  yield gr.ChatMessage(
99
  role="assistant",
100
  content=str(step_log.error),
101
- metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
 
 
 
 
102
  )
103
 
104
  # Update parent message metadata to done status without yielding a new message
@@ -106,17 +129,25 @@ def pull_messages_from_step(
106
 
107
  # Handle standalone errors but not from tool calls
108
  elif hasattr(step_log, "error") and step_log.error is not None:
109
- yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
 
 
 
 
110
 
111
  # Calculate duration and token information
112
  step_footnote = f"{step_number}"
113
- if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
114
- token_str = (
115
- f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
116
- )
117
  step_footnote += token_str
118
  if hasattr(step_log, "duration"):
119
- step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
 
 
 
 
120
  step_footnote += step_duration
121
  step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
122
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
@@ -139,7 +170,9 @@ def stream_to_gradio(
139
  total_input_tokens = 0
140
  total_output_tokens = 0
141
 
142
- for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
 
 
143
  # Track tokens if model provides them
144
  if hasattr(agent.model, "last_input_token_count"):
145
  total_input_tokens += agent.model.last_input_token_count
@@ -172,7 +205,9 @@ def stream_to_gradio(
172
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
173
  )
174
  else:
175
- yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
 
 
176
 
177
 
178
  class GradioUI:
@@ -242,10 +277,14 @@ class GradioUI:
242
  sanitized_name = "".join(sanitized_name)
243
 
244
  # Save the uploaded file to the specified folder
245
- file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
 
 
246
  shutil.copy(file.name, file_path)
247
 
248
- return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
 
 
249
 
250
  def log_user_message(self, text_input, file_uploads_log):
251
  return (
@@ -277,7 +316,9 @@ class GradioUI:
277
  # If an upload folder is provided, enable the upload feature
278
  if self.file_upload_folder is not None:
279
  upload_file = gr.File(label="Upload a file")
280
- upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
 
 
281
  upload_file.change(
282
  self.upload_file,
283
  [upload_file, file_uploads_log],
@@ -293,4 +334,4 @@ class GradioUI:
293
  demo.launch(debug=True, share=True, **kwargs)
294
 
295
 
296
- __all__ = ["stream_to_gradio", "GradioUI"]
 
19
  import shutil
20
  from typing import Optional
21
 
22
+ from smolagents.agent_types import (
23
+ AgentAudio,
24
+ AgentImage,
25
+ AgentText,
26
+ handle_agent_output_types,
27
+ )
28
  from smolagents.agents import ActionStep, MultiStepAgent
29
  from smolagents.memory import MemoryStep
30
  from smolagents.utils import _is_package_available
31
 
32
 
33
+ def pull_messages_from_step(step_log: MemoryStep):
 
 
34
  """Extract ChatMessage objects from agent steps with proper nesting"""
35
  import gradio as gr
36
 
37
  if isinstance(step_log, ActionStep):
38
  # Output the step number
39
+ step_number = (
40
+ f"Step {step_log.step_number}" if step_log.step_number is not None else ""
41
+ )
42
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
43
 
44
  # First yield the thought/reasoning from the LLM
 
46
  # Clean up the LLM output
47
  model_output = step_log.model_output.strip()
48
  # Remove any trailing <end_code> and extra backticks, handling multiple possible formats
49
+ model_output = re.sub(
50
+ r"```\s*<end_code>", "```", model_output
51
+ ) # handles ```<end_code>
52
+ model_output = re.sub(
53
+ r"<end_code>\s*```", "```", model_output
54
+ ) # handles <end_code>```
55
+ model_output = re.sub(
56
+ r"```\s*\n\s*<end_code>", "```", model_output
57
+ ) # handles ```\n<end_code>
58
  model_output = model_output.strip()
59
  yield gr.ChatMessage(role="assistant", content=model_output)
60
 
 
74
 
75
  if used_code:
76
  # Clean up the content by removing any end code tags
77
+ content = re.sub(
78
+ r"```.*?\n", "", content
79
+ ) # Remove existing code blocks
80
+ content = re.sub(
81
+ r"\s*<end_code>\s*", "", content
82
+ ) # Remove end_code tags
83
  content = content.strip()
84
  if not content.startswith("```python"):
85
  content = f"```python\n{content}\n```"
 
105
  yield gr.ChatMessage(
106
  role="assistant",
107
  content=f"{log_content}",
108
+ metadata={
109
+ "title": "📝 Execution Logs",
110
+ "parent_id": parent_id,
111
+ "status": "done",
112
+ },
113
  )
114
 
115
  # Nesting any errors under the tool call
 
117
  yield gr.ChatMessage(
118
  role="assistant",
119
  content=str(step_log.error),
120
+ metadata={
121
+ "title": "💥 Error",
122
+ "parent_id": parent_id,
123
+ "status": "done",
124
+ },
125
  )
126
 
127
  # Update parent message metadata to done status without yielding a new message
 
129
 
130
  # Handle standalone errors but not from tool calls
131
  elif hasattr(step_log, "error") and step_log.error is not None:
132
+ yield gr.ChatMessage(
133
+ role="assistant",
134
+ content=str(step_log.error),
135
+ metadata={"title": "💥 Error"},
136
+ )
137
 
138
  # Calculate duration and token information
139
  step_footnote = f"{step_number}"
140
+ if hasattr(step_log, "input_token_count") and hasattr(
141
+ step_log, "output_token_count"
142
+ ):
143
+ token_str = f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
144
  step_footnote += token_str
145
  if hasattr(step_log, "duration"):
146
+ step_duration = (
147
+ f" | Duration: {round(float(step_log.duration), 2)}"
148
+ if step_log.duration
149
+ else None
150
+ )
151
  step_footnote += step_duration
152
  step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
153
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
 
170
  total_input_tokens = 0
171
  total_output_tokens = 0
172
 
173
+ for step_log in agent.run(
174
+ task, stream=True, reset=reset_agent_memory, additional_args=additional_args
175
+ ):
176
  # Track tokens if model provides them
177
  if hasattr(agent.model, "last_input_token_count"):
178
  total_input_tokens += agent.model.last_input_token_count
 
205
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
206
  )
207
  else:
208
+ yield gr.ChatMessage(
209
+ role="assistant", content=f"**Final answer:** {str(final_answer)}"
210
+ )
211
 
212
 
213
  class GradioUI:
 
277
  sanitized_name = "".join(sanitized_name)
278
 
279
  # Save the uploaded file to the specified folder
280
+ file_path = os.path.join(
281
+ self.file_upload_folder, os.path.basename(sanitized_name)
282
+ )
283
  shutil.copy(file.name, file_path)
284
 
285
+ return gr.Textbox(
286
+ f"File uploaded: {file_path}", visible=True
287
+ ), file_uploads_log + [file_path]
288
 
289
  def log_user_message(self, text_input, file_uploads_log):
290
  return (
 
316
  # If an upload folder is provided, enable the upload feature
317
  if self.file_upload_folder is not None:
318
  upload_file = gr.File(label="Upload a file")
319
+ upload_status = gr.Textbox(
320
+ label="Upload Status", interactive=False, visible=False
321
+ )
322
  upload_file.change(
323
  self.upload_file,
324
  [upload_file, file_uploads_log],
 
334
  demo.launch(debug=True, share=True, **kwargs)
335
 
336
 
337
+ __all__ = ["stream_to_gradio", "GradioUI"]
agent.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "tools": [
3
- "web_search",
4
- "visit_webpage",
5
- "final_answer"
6
  ],
7
  "model": {
8
  "class": "HfApiModel",
@@ -48,6 +48,7 @@
48
  "queue",
49
  "time",
50
  "collections",
51
- "re"
 
52
  ]
53
  }
 
1
  {
2
  "tools": [
3
+ "final_answer",
4
+ "get_current_time_in_timezone",
5
+ "get_news_from_place"
6
  ],
7
  "model": {
8
  "class": "HfApiModel",
 
48
  "queue",
49
  "time",
50
  "collections",
51
+ "re",
52
+ "pytz"
53
  ]
54
  }
app.py CHANGED
@@ -1,22 +1,28 @@
1
- from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
2
  import datetime
3
- import requests
4
  import pytz
5
  import yaml
6
- from tools.final_answer import FinalAnswerTool
7
 
8
  from Gradio_UI import GradioUI
 
 
 
9
 
10
- # Below is an example of a tool that does nothing. Amaze us with your creativity !
11
  @tool
12
- def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
13
- #Keep this format for the description / args / args description but feel free to modify the tool
14
- """A tool that does nothing yet
 
15
  Args:
16
- arg1: the first argument
17
- arg2: the second argument
18
  """
19
- return "What magic will you build ?"
 
 
 
 
 
20
 
21
  @tool
22
  def get_current_time_in_timezone(timezone: str) -> str:
@@ -25,9 +31,7 @@ def get_current_time_in_timezone(timezone: str) -> str:
25
  timezone: A string representing a valid timezone (e.g., 'America/New_York').
26
  """
27
  try:
28
- # Create timezone object
29
  tz = pytz.timezone(timezone)
30
- # Get current time in that timezone
31
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
32
  return f"The current local time in {timezone} is: {local_time}"
33
  except Exception as e:
@@ -36,34 +40,27 @@ def get_current_time_in_timezone(timezone: str) -> str:
36
 
37
  final_answer = FinalAnswerTool()
38
 
39
- # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
40
- # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
41
-
42
  model = HfApiModel(
43
- max_tokens=2096,
44
- temperature=0.5,
45
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
46
- custom_role_conversions=None,
47
  )
48
 
49
-
50
- # Import tool from Hub
51
- image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
52
-
53
- with open("prompts.yaml", 'r') as stream:
54
  prompt_templates = yaml.safe_load(stream)
55
-
56
  agent = CodeAgent(
57
  model=model,
58
- tools=[final_answer], ## add your tools here (don't remove final answer)
59
  max_steps=6,
60
  verbosity_level=1,
61
  grammar=None,
62
  planning_interval=None,
63
  name=None,
64
  description=None,
65
- prompt_templates=prompt_templates
 
66
  )
67
 
68
-
69
- GradioUI(agent).launch()
 
 
1
  import datetime
2
+
3
  import pytz
4
  import yaml
5
+ from smolagents import CodeAgent, HfApiModel, tool
6
 
7
  from Gradio_UI import GradioUI
8
+ from tools.final_answer import FinalAnswerTool
9
+ from tools.visit_webpage import VisitWebpageTool
10
+
11
 
 
12
  @tool
13
+ def get_news_from_place(place: str) -> str:
14
+ """
15
+ Fetches the top 5 news headlines from Google News for a specified place.
16
+
17
  Args:
18
+ place: The place to search news for (e.g., 'London').
 
19
  """
20
+ url = f"https://news.google.com/rss/search?q={place}&hl=en-US&gl=US&ceid=US:en"
21
+ visit_webpage_tool = VisitWebpageTool()
22
+ xml_content = visit_webpage_tool.forward(url=url)
23
+ # TODO: parse the xml content and return the top 5 headlines
24
+ return xml_content
25
+
26
 
27
  @tool
28
  def get_current_time_in_timezone(timezone: str) -> str:
 
31
  timezone: A string representing a valid timezone (e.g., 'America/New_York').
32
  """
33
  try:
 
34
  tz = pytz.timezone(timezone)
 
35
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
36
  return f"The current local time in {timezone} is: {local_time}"
37
  except Exception as e:
 
40
 
41
  final_answer = FinalAnswerTool()
42
 
 
 
 
43
  model = HfApiModel(
44
+ max_tokens=2096,
45
+ temperature=0.5,
46
+ model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
47
+ custom_role_conversions=None,
48
  )
49
 
50
+ with open("prompts.yaml", "r") as stream:
 
 
 
 
51
  prompt_templates = yaml.safe_load(stream)
52
+
53
  agent = CodeAgent(
54
  model=model,
55
+ tools=[final_answer, get_current_time_in_timezone, get_news_from_place],
56
  max_steps=6,
57
  verbosity_level=1,
58
  grammar=None,
59
  planning_interval=None,
60
  name=None,
61
  description=None,
62
+ prompt_templates=prompt_templates,
63
+ additional_authorized_imports=["pytz"],
64
  )
65
 
66
+ GradioUI(agent).launch()
 
prompts.yaml CHANGED
@@ -42,7 +42,7 @@
42
  Task:
43
  "Answer the question in the variable `question` about the image stored in the variable `image`. The question is in French.
44
  You have been provided with these additional arguments, that you can access using the keys as variables in your python code:
45
- {'question': 'Quel est l'animal sur l'image?', 'image': 'path/to/image.jpg'}"
46
 
47
  Thought: I will use the following tools: `translator` to translate the question into English and then `image_qa` to answer the question on the input image.
48
  Code:
@@ -141,6 +141,58 @@
141
  final_answer(pope_current_age)
142
  ```<end_code>
143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  Above example were using notional tools that might not exist for you. On top of performing computations in the Python code snippets that you create, you only have access to these tools:
145
  {%- for tool in tools.values() %}
146
  - {{ tool.name }}: {{ tool.description }}
 
42
  Task:
43
  "Answer the question in the variable `question` about the image stored in the variable `image`. The question is in French.
44
  You have been provided with these additional arguments, that you can access using the keys as variables in your python code:
45
+ {'question': 'Quel est l\'animal sur l\'image?', 'image': 'path/to/image.jpg'}"
46
 
47
  Thought: I will use the following tools: `translator` to translate the question into English and then `image_qa` to answer the question on the input image.
48
  Code:
 
141
  final_answer(pope_current_age)
142
  ```<end_code>
143
 
144
+ ---
145
+ Task: "What is the current time and news in Tokyo?"
146
+
147
+ Thought: I will use the `get_current_time_in_timezone` and `get_news_from_place` tools to get the time and news for Tokyo.
148
+ Code:
149
+ ```py
150
+ time_in_tokyo = get_current_time_in_timezone(timezone="Asia/Tokyo")
151
+ print(time_in_tokyo)
152
+ news_in_tokyo = get_news_from_place(timezone="Asia/Tokyo")
153
+ print(news_in_tokyo)
154
+ ```<end_code>
155
+ Observation:
156
+ The current local time in Asia/Tokyo is: 2024-07-15 17:43:34
157
+ Top News Headlines for Asia/Tokyo:
158
+ - Headline 1
159
+ - Headline 2
160
+ - Headline 3
161
+ - Headline 4
162
+ - Headline 5
163
+
164
+ Thought: I have the current time and news headlines for Tokyo. I will now combine them and provide the final answer.
165
+ Code:
166
+ ```py
167
+ final_answer(f"{time_in_tokyo}\\n{news_in_tokyo}")
168
+ ```<end_code>
169
+
170
+ ---
171
+ Task: "Get me the news from London and tell me what time it is there."
172
+
173
+ Thought: I will use `get_current_news_in_timezone` and `get_current_time_in_timezone` tools, specifying 'Europe/London' as the timezone.
174
+ Code:
175
+ ```python
176
+ news = get_current_news_in_timezone(timezone="Europe/London")
177
+ print(news)
178
+ time = get_current_time_in_timezone(timezone="Europe/London")
179
+ print(time)
180
+ ```<end_code>
181
+ Observation:
182
+ Top News Headlines for Europe/London:
183
+ - Headline 1
184
+ - Headline 2
185
+ - Headline 3
186
+ - Headline 4
187
+ - Headline 5
188
+ The current local time in Europe/London is: 2024-07-15 09:45:12
189
+
190
+ Thought: I will now provide the final answer.
191
+ Code:
192
+ ```python
193
+ final_answer(f"{time}\\n{news}")
194
+ ```<end_code>
195
+
196
  Above example were using notional tools that might not exist for you. On top of performing computations in the Python code snippets that you create, you only have access to these tools:
197
  {%- for tool in tools.values() %}
198
  - {{ tool.name }}: {{ tool.description }}
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
  markdownify
2
- smolagents
3
  requests
4
  duckduckgo_search
5
  pandas
 
1
  markdownify
2
+ smolagents[gradio]
3
  requests
4
  duckduckgo_search
5
  pandas
tools/final_answer.py CHANGED
@@ -1,10 +1,17 @@
1
- from typing import Any, Optional
 
2
  from smolagents.tools import Tool
3
 
 
4
  class FinalAnswerTool(Tool):
5
  name = "final_answer"
6
  description = "Provides a final answer to the given problem."
7
- inputs = {'answer': {'type': 'any', 'description': 'The final answer to the problem'}}
 
 
 
 
 
8
  output_type = "any"
9
 
10
  def forward(self, answer: Any) -> Any:
 
1
+ from typing import Any
2
+
3
  from smolagents.tools import Tool
4
 
5
+
6
  class FinalAnswerTool(Tool):
7
  name = "final_answer"
8
  description = "Provides a final answer to the given problem."
9
+ inputs = {
10
+ "answer": {
11
+ "type": "any",
12
+ "description": "The final answer to the problem",
13
+ }
14
+ }
15
  output_type = "any"
16
 
17
  def forward(self, answer: Any) -> Any:
tools/visit_webpage.py CHANGED
@@ -1,13 +1,17 @@
1
- from typing import Any, Optional
 
2
  from smolagents.tools import Tool
3
- import requests
4
- import markdownify
5
- import smolagents
6
 
7
  class VisitWebpageTool(Tool):
8
  name = "visit_webpage"
9
  description = "Visits a webpage at the given url and reads its content as a markdown string. Use this to browse webpages."
10
- inputs = {'url': {'type': 'string', 'description': 'The url of the webpage to visit.'}}
 
 
 
 
 
11
  output_type = "string"
12
 
13
  def forward(self, url: str) -> str:
@@ -15,7 +19,6 @@ class VisitWebpageTool(Tool):
15
  import requests
16
  from markdownify import markdownify
17
  from requests.exceptions import RequestException
18
-
19
  from smolagents.utils import truncate_content
20
  except ImportError as e:
21
  raise ImportError(
 
1
+ import re
2
+
3
  from smolagents.tools import Tool
4
+
 
 
5
 
6
  class VisitWebpageTool(Tool):
7
  name = "visit_webpage"
8
  description = "Visits a webpage at the given url and reads its content as a markdown string. Use this to browse webpages."
9
+ inputs = {
10
+ "url": {
11
+ "type": "string",
12
+ "description": "The url of the webpage to visit.",
13
+ }
14
+ }
15
  output_type = "string"
16
 
17
  def forward(self, url: str) -> str:
 
19
  import requests
20
  from markdownify import markdownify
21
  from requests.exceptions import RequestException
 
22
  from smolagents.utils import truncate_content
23
  except ImportError as e:
24
  raise ImportError(
tools/web_search.py CHANGED
@@ -1,11 +1,15 @@
1
- from typing import Any, Optional
2
  from smolagents.tools import Tool
3
- import duckduckgo_search
4
 
5
  class DuckDuckGoSearchTool(Tool):
6
  name = "web_search"
7
  description = "Performs a duckduckgo web search based on your query (think a Google search) then returns the top search results."
8
- inputs = {'query': {'type': 'string', 'description': 'The search query to perform.'}}
 
 
 
 
 
9
  output_type = "string"
10
 
11
  def __init__(self, max_results=10, **kwargs):
@@ -23,5 +27,8 @@ class DuckDuckGoSearchTool(Tool):
23
  results = self.ddgs.text(query, max_results=self.max_results)
24
  if len(results) == 0:
25
  raise Exception("No results found! Try a less restrictive/shorter query.")
26
- postprocessed_results = [f"[{result['title']}]({result['href']})\n{result['body']}" for result in results]
 
 
 
27
  return "## Search Results\n\n" + "\n\n".join(postprocessed_results)
 
 
1
  from smolagents.tools import Tool
2
+
3
 
4
  class DuckDuckGoSearchTool(Tool):
5
  name = "web_search"
6
  description = "Performs a duckduckgo web search based on your query (think a Google search) then returns the top search results."
7
+ inputs = {
8
+ "query": {
9
+ "type": "string",
10
+ "description": "The search query to perform.",
11
+ }
12
+ }
13
  output_type = "string"
14
 
15
  def __init__(self, max_results=10, **kwargs):
 
27
  results = self.ddgs.text(query, max_results=self.max_results)
28
  if len(results) == 0:
29
  raise Exception("No results found! Try a less restrictive/shorter query.")
30
+ postprocessed_results = [
31
+ f"[{result['title']}]({result['href']})\n{result['body']}"
32
+ for result in results
33
+ ]
34
  return "## Search Results\n\n" + "\n\n".join(postprocessed_results)