Spaces:
Sleeping
Sleeping
Test MCP ToolCollection
Browse files
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
-
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
|
|
|
2 |
import datetime
|
3 |
import requests
|
4 |
import pytz
|
@@ -34,6 +35,12 @@ def get_current_time_in_timezone(timezone: str) -> str:
|
|
34 |
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
35 |
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
final_answer = FinalAnswerTool()
|
38 |
|
39 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
@@ -53,17 +60,32 @@ image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_co
|
|
53 |
with open("prompts.yaml", 'r') as stream:
|
54 |
prompt_templates = yaml.safe_load(stream)
|
55 |
|
56 |
-
agent = CodeAgent(
|
57 |
-
model=model,
|
58 |
-
tools=[final_answer, image_generation_tool], ## add your tools here (don't remove final answer)
|
59 |
-
max_steps=6,
|
60 |
-
verbosity_level=1,
|
61 |
-
grammar=None,
|
62 |
-
planning_interval=None,
|
63 |
-
name=None,
|
64 |
-
description=None,
|
65 |
-
prompt_templates=prompt_templates
|
66 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
|
69 |
GradioUI(agent).launch()
|
|
|
1 |
+
from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool, ToolCollection
|
2 |
+
from mcp import StdioServerParameters
|
3 |
import datetime
|
4 |
import requests
|
5 |
import pytz
|
|
|
35 |
return f"Error fetching time for timezone '{timezone}': {str(e)}"
|
36 |
|
37 |
|
38 |
+
server_parameters = StdioServerParameters(
|
39 |
+
command="uv",
|
40 |
+
args=["--quiet", "[email protected]"],
|
41 |
+
env={"UV_PYTHON": "3.12", **os.environ},
|
42 |
+
)
|
43 |
+
|
44 |
final_answer = FinalAnswerTool()
|
45 |
|
46 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
|
|
60 |
with open("prompts.yaml", 'r') as stream:
|
61 |
prompt_templates = yaml.safe_load(stream)
|
62 |
|
63 |
+
#agent = CodeAgent(
|
64 |
+
# model=model,
|
65 |
+
# tools=[final_answer, image_generation_tool], ## add your tools here (don't remove final answer)
|
66 |
+
# max_steps=6,
|
67 |
+
# verbosity_level=1,
|
68 |
+
# grammar=None,
|
69 |
+
# planning_interval=None,
|
70 |
+
# name=None,
|
71 |
+
# description=None,
|
72 |
+
# prompt_templates=prompt_templates
|
73 |
+
#)
|
74 |
+
|
75 |
+
with ToolCollection.from_mcp(server_parameters) as tool_collection:
|
76 |
+
agent = CodeAgent(
|
77 |
+
model=model,
|
78 |
+
tools=[final_answer, *tool_collection.tools],
|
79 |
+
add_base_tools=True
|
80 |
+
max_steps=6,
|
81 |
+
verbosity_level=1,
|
82 |
+
grammar=None,
|
83 |
+
planning_interval=None,
|
84 |
+
name=None,
|
85 |
+
description=None,
|
86 |
+
prompt_templates=prompt_templates
|
87 |
+
)
|
88 |
+
#agent.run("Please find a remedy for hangover.")
|
89 |
|
90 |
|
91 |
GradioUI(agent).launch()
|