wt002's picture
Update agent.py
836d49c verified
raw
history blame
4.04 kB
# agent.py
import os
from dotenv import load_dotenv
from typing import TypedDict, Annotated, Sequence, Dict, Any, List
from langchain_core.messages import BaseMessage, HumanMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.graph import END, StateGraph
from langgraph.prebuilt import ToolNode
from langchain_community.tools import DuckDuckGoSearchResults
from langchain_community.utilities import WikipediaAPIWrapper
from langchain.agents import create_tool_calling_agent, AgentExecutor
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import operator
from langchain_experimental.utilities import PythonREPL
load_dotenv()
class AgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
sender: str
@tool
def wikipedia_search(query: str) -> str:
"""Search Wikipedia for information."""
return WikipediaAPIWrapper().run(query)
@tool
def web_search(query: str, num_results: int = 3) -> list:
"""Search the web for current information."""
return DuckDuckGoSearchResults(num_results=num_results).run(query)
@tool
def calculate(expression: str) -> str:
"""Evaluate mathematical expressions."""
python_repl = PythonREPL()
return python_repl.run(expression)
class BasicAgent:
"""A complete langgraph agent implementation."""
def __init__(self, model_name: str = "gpt-3.5-turbo"):
self.tools = [wikipedia_search, web_search, calculate]
self.llm = ChatOpenAI(model=model_name, temperature=0.7)
self.agent_executor = self._build_agent_executor()
self.workflow = self._build_workflow() # Initialize workflow here
def _build_agent_executor(self) -> AgentExecutor:
"""Build the agent executor with tools."""
prompt = ChatPromptTemplate.from_messages([
("system", "You are a helpful AI assistant. Use tools when needed."),
MessagesPlaceholder(variable_name="messages"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
agent = create_tool_calling_agent(self.llm, self.tools, prompt)
return AgentExecutor(agent=agent, tools=self.tools, verbose=True)
def _build_workflow(self) -> StateGraph:
"""Build and compile the agent workflow."""
workflow = StateGraph(AgentState)
workflow.add_node("agent", self._run_agent)
workflow.add_node("tools", ToolNode(self.tools))
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent",
self._should_continue,
{"continue": "tools", "end": END}
)
workflow.add_edge("tools", "agent")
return workflow.compile()
def _run_agent(self, state: AgentState) -> Dict[str, Any]:
"""Execute the agent."""
response = self.agent_executor.invoke({"messages": state["messages"]})
return {"messages": [response["output"]]}
def _should_continue(self, state: AgentState) -> str:
"""Determine if the workflow should continue."""
last_message = state["messages"][-1]
return "continue" if last_message.additional_kwargs.get("tool_calls") else "end"
def __call__(self, question: str) -> str:
"""Process a user question and return a response."""
# Initialize state with the user's question
state = AgentState(messages=[HumanMessage(content=question)], sender="user")
# Execute the workflow
for output in self.workflow.stream(state):
for key, value in output.items():
if key == "messages":
for message in value:
if isinstance(message, BaseMessage):
return message.content
return "Sorry, I couldn't generate a response."
# Example usage
if __name__ == "__main__":
agent = BasicAgent()
response = agent("What's the capital of France?")
print(response)