File size: 4,036 Bytes
1dfef0f
03aebad
836d49c
a52ceb6
 
7240bca
a52ceb6
 
 
 
 
 
 
 
 
037cb93
03aebad
 
037cb93
a52ceb6
 
 
037cb93
 
a52ceb6
 
 
7240bca
 
a52ceb6
 
 
7240bca
 
a52ceb6
 
 
 
7240bca
a52ceb6
 
1dfef0f
a52ceb6
 
 
 
 
9af2eae
a52ceb6
 
 
 
 
 
7240bca
a52ceb6
 
1dfef0f
a52ceb6
 
 
 
 
 
 
 
 
 
 
 
7240bca
a52ceb6
 
 
1dfef0f
a52ceb6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7240bca
a52ceb6
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# agent.py
import os
from dotenv import load_dotenv
from typing import TypedDict, Annotated, Sequence, Dict, Any, List
from langchain_core.messages import BaseMessage, HumanMessage
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.graph import END, StateGraph
from langgraph.prebuilt import ToolNode
from langchain_community.tools import DuckDuckGoSearchResults
from langchain_community.utilities import WikipediaAPIWrapper
from langchain.agents import create_tool_calling_agent, AgentExecutor
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
import operator
from langchain_experimental.utilities import PythonREPL

load_dotenv()


class AgentState(TypedDict):
    messages: Annotated[Sequence[BaseMessage], operator.add]
    sender: str

@tool
def wikipedia_search(query: str) -> str:
    """Search Wikipedia for information."""
    return WikipediaAPIWrapper().run(query)

@tool
def web_search(query: str, num_results: int = 3) -> list:
    """Search the web for current information."""
    return DuckDuckGoSearchResults(num_results=num_results).run(query)

@tool
def calculate(expression: str) -> str:
    """Evaluate mathematical expressions."""
    python_repl = PythonREPL()
    return python_repl.run(expression)

class BasicAgent:
    """A complete langgraph agent implementation."""
    
    def __init__(self, model_name: str = "gpt-3.5-turbo"):
        self.tools = [wikipedia_search, web_search, calculate]
        self.llm = ChatOpenAI(model=model_name, temperature=0.7)
        self.agent_executor = self._build_agent_executor()
        self.workflow = self._build_workflow()  # Initialize workflow here
    
    def _build_agent_executor(self) -> AgentExecutor:
        """Build the agent executor with tools."""
        prompt = ChatPromptTemplate.from_messages([
            ("system", "You are a helpful AI assistant. Use tools when needed."),
            MessagesPlaceholder(variable_name="messages"),
            MessagesPlaceholder(variable_name="agent_scratchpad"),
        ])
        agent = create_tool_calling_agent(self.llm, self.tools, prompt)
        return AgentExecutor(agent=agent, tools=self.tools, verbose=True)
    
    def _build_workflow(self) -> StateGraph:
        """Build and compile the agent workflow."""
        workflow = StateGraph(AgentState)
        
        workflow.add_node("agent", self._run_agent)
        workflow.add_node("tools", ToolNode(self.tools))
        
        workflow.set_entry_point("agent")
        workflow.add_conditional_edges(
            "agent",
            self._should_continue,
            {"continue": "tools", "end": END}
        )
        workflow.add_edge("tools", "agent")
        
        return workflow.compile()
    
    def _run_agent(self, state: AgentState) -> Dict[str, Any]:
        """Execute the agent."""
        response = self.agent_executor.invoke({"messages": state["messages"]})
        return {"messages": [response["output"]]}
    
    def _should_continue(self, state: AgentState) -> str:
        """Determine if the workflow should continue."""
        last_message = state["messages"][-1]
        return "continue" if last_message.additional_kwargs.get("tool_calls") else "end"
    
    def __call__(self, question: str) -> str:
        """Process a user question and return a response."""
        # Initialize state with the user's question
        state = AgentState(messages=[HumanMessage(content=question)], sender="user")
        
        # Execute the workflow
        for output in self.workflow.stream(state):
            for key, value in output.items():
                if key == "messages":
                    for message in value:
                        if isinstance(message, BaseMessage):
                            return message.content
        
        return "Sorry, I couldn't generate a response."

# Example usage
if __name__ == "__main__":
    agent = BasicAgent()
    response = agent("What's the capital of France?")
    print(response)