Spaces:
Running
Running
Update agent.py
Browse files
agent.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
# agent.py
|
2 |
import os
|
3 |
from dotenv import load_dotenv
|
4 |
-
from typing import TypedDict, Annotated, Sequence, Dict, Any
|
5 |
from langchain_core.messages import BaseMessage, HumanMessage
|
6 |
from langchain_core.tools import tool
|
7 |
from langchain_openai import ChatOpenAI
|
@@ -12,6 +12,7 @@ from langchain_community.utilities import WikipediaAPIWrapper
|
|
12 |
from langchain.agents import create_tool_calling_agent, AgentExecutor
|
13 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
14 |
import operator
|
|
|
15 |
|
16 |
load_dotenv()
|
17 |
|
@@ -41,7 +42,7 @@ class AIAgent:
|
|
41 |
self.tools = [wikipedia_search, web_search, calculate]
|
42 |
self.llm = ChatOpenAI(model=model_name, temperature=0.7)
|
43 |
self.agent_executor = self._build_agent_executor()
|
44 |
-
self.workflow = self._build_workflow()
|
45 |
|
46 |
def _build_agent_executor(self) -> AgentExecutor:
|
47 |
"""Build the agent executor"""
|
@@ -57,11 +58,9 @@ class AIAgent:
|
|
57 |
"""Build and return the compiled workflow"""
|
58 |
workflow = StateGraph(AgentState)
|
59 |
|
60 |
-
# Define nodes
|
61 |
workflow.add_node("agent", self._run_agent)
|
62 |
workflow.add_node("tools", ToolNode(self.tools))
|
63 |
|
64 |
-
# Define edges
|
65 |
workflow.set_entry_point("agent")
|
66 |
workflow.add_conditional_edges(
|
67 |
"agent",
|
@@ -113,4 +112,17 @@ class AIAgent:
|
|
113 |
if hasattr(msg, 'additional_kwargs') and 'tool_calls' in msg.additional_kwargs:
|
114 |
for call in msg.additional_kwargs['tool_calls']:
|
115 |
steps.append(f"Used {call['function']['name']}: {call['function']['arguments']}")
|
116 |
-
return steps
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# agent.py
|
2 |
import os
|
3 |
from dotenv import load_dotenv
|
4 |
+
from typing import TypedDict, Annotated, Sequence, Dict, Any, List # Added List here
|
5 |
from langchain_core.messages import BaseMessage, HumanMessage
|
6 |
from langchain_core.tools import tool
|
7 |
from langchain_openai import ChatOpenAI
|
|
|
12 |
from langchain.agents import create_tool_calling_agent, AgentExecutor
|
13 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
14 |
import operator
|
15 |
+
import operator
|
16 |
|
17 |
load_dotenv()
|
18 |
|
|
|
42 |
self.tools = [wikipedia_search, web_search, calculate]
|
43 |
self.llm = ChatOpenAI(model=model_name, temperature=0.7)
|
44 |
self.agent_executor = self._build_agent_executor()
|
45 |
+
self.workflow = self._build_workflow()
|
46 |
|
47 |
def _build_agent_executor(self) -> AgentExecutor:
|
48 |
"""Build the agent executor"""
|
|
|
58 |
"""Build and return the compiled workflow"""
|
59 |
workflow = StateGraph(AgentState)
|
60 |
|
|
|
61 |
workflow.add_node("agent", self._run_agent)
|
62 |
workflow.add_node("tools", ToolNode(self.tools))
|
63 |
|
|
|
64 |
workflow.set_entry_point("agent")
|
65 |
workflow.add_conditional_edges(
|
66 |
"agent",
|
|
|
112 |
if hasattr(msg, 'additional_kwargs') and 'tool_calls' in msg.additional_kwargs:
|
113 |
for call in msg.additional_kwargs['tool_calls']:
|
114 |
steps.append(f"Used {call['function']['name']}: {call['function']['arguments']}")
|
115 |
+
return steps
|
116 |
+
|
117 |
+
if __name__ == "__main__":
|
118 |
+
agent = AIAgent()
|
119 |
+
response = agent("What's the capital of France?")
|
120 |
+
print("Response:", response["response"])
|
121 |
+
if response["sources"]:
|
122 |
+
print("\nSources:")
|
123 |
+
for source in response["sources"]:
|
124 |
+
print("-", source)
|
125 |
+
if response["steps"]:
|
126 |
+
print("\nSteps:")
|
127 |
+
for step in response["steps"]:
|
128 |
+
print("-", step)
|