Spaces:
Running
Running
Update agent.py
Browse files
agent.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
# agent.py
|
2 |
import os
|
3 |
from dotenv import load_dotenv
|
4 |
-
from typing import TypedDict, Annotated, Sequence, Dict, Any, List
|
5 |
from langchain_core.messages import BaseMessage, HumanMessage
|
6 |
from langchain_core.tools import tool
|
7 |
from langchain_openai import ChatOpenAI
|
@@ -12,7 +12,6 @@ from langchain_community.utilities import WikipediaAPIWrapper
|
|
12 |
from langchain.agents import create_tool_calling_agent, AgentExecutor
|
13 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
14 |
import operator
|
15 |
-
import operator
|
16 |
|
17 |
load_dotenv()
|
18 |
|
@@ -37,13 +36,37 @@ def calculate(expression: str) -> str:
|
|
37 |
python_repl = PythonREPL()
|
38 |
return python_repl.run(expression)
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
class AIAgent:
|
41 |
def __init__(self, model_name: str = "gpt-3.5-turbo"):
|
42 |
self.tools = [wikipedia_search, web_search, calculate]
|
43 |
self.llm = ChatOpenAI(model=model_name, temperature=0.7)
|
44 |
self.agent_executor = self._build_agent_executor()
|
45 |
-
self.workflow = self.
|
46 |
-
|
47 |
def _build_agent_executor(self) -> AgentExecutor:
|
48 |
"""Build the agent executor"""
|
49 |
prompt = ChatPromptTemplate.from_messages([
|
|
|
1 |
# agent.py
|
2 |
import os
|
3 |
from dotenv import load_dotenv
|
4 |
+
from typing import TypedDict, Annotated, Sequence, Dict, Any, List
|
5 |
from langchain_core.messages import BaseMessage, HumanMessage
|
6 |
from langchain_core.tools import tool
|
7 |
from langchain_openai import ChatOpenAI
|
|
|
12 |
from langchain.agents import create_tool_calling_agent, AgentExecutor
|
13 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
14 |
import operator
|
|
|
15 |
|
16 |
load_dotenv()
|
17 |
|
|
|
36 |
python_repl = PythonREPL()
|
37 |
return python_repl.run(expression)
|
38 |
|
39 |
+
def build_graph(tools: list, agent_executor: AgentExecutor) -> StateGraph:
|
40 |
+
"""Build and return the compiled workflow graph"""
|
41 |
+
workflow = StateGraph(AgentState)
|
42 |
+
|
43 |
+
def run_agent(state: AgentState) -> Dict[str, Any]:
|
44 |
+
response = agent_executor.invoke({"messages": state["messages"]})
|
45 |
+
return {"messages": [response["output"]]}
|
46 |
+
|
47 |
+
def should_continue(state: AgentState) -> str:
|
48 |
+
last_message = state["messages"][-1]
|
49 |
+
return "continue" if last_message.additional_kwargs.get("tool_calls") else "end"
|
50 |
+
|
51 |
+
workflow.add_node("agent", run_agent)
|
52 |
+
workflow.add_node("tools", ToolNode(tools))
|
53 |
+
workflow.set_entry_point("agent")
|
54 |
+
workflow.add_conditional_edges(
|
55 |
+
"agent",
|
56 |
+
should_continue,
|
57 |
+
{"continue": "tools", "end": END}
|
58 |
+
)
|
59 |
+
workflow.add_edge("tools", "agent")
|
60 |
+
|
61 |
+
return workflow.compile()
|
62 |
+
|
63 |
class AIAgent:
|
64 |
def __init__(self, model_name: str = "gpt-3.5-turbo"):
|
65 |
self.tools = [wikipedia_search, web_search, calculate]
|
66 |
self.llm = ChatOpenAI(model=model_name, temperature=0.7)
|
67 |
self.agent_executor = self._build_agent_executor()
|
68 |
+
self.workflow = build_graph(self.tools, self.agent_executor) # Using the standalone function
|
69 |
+
|
70 |
def _build_agent_executor(self) -> AgentExecutor:
|
71 |
"""Build the agent executor"""
|
72 |
prompt = ChatPromptTemplate.from_messages([
|