Spaces:
Running
Running
upload 2 files
Browse files- agent.py +179 -0
- system_prompt.txt +74 -0
agent.py
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import TypedDict, Annotated, Sequence
|
2 |
+
from langchain_core.messages import BaseMessage, HumanMessage
|
3 |
+
from langchain_core.tools import tool
|
4 |
+
from langchain_openai import ChatOpenAI
|
5 |
+
from langgraph.graph import END, StateGraph
|
6 |
+
from langgraph.prebuilt import ToolExecutor, ToolInvocation
|
7 |
+
from langchain.tools import DuckDuckGoSearchResults
|
8 |
+
from langchain_community.utilities import WikipediaAPIWrapper
|
9 |
+
from langchain.agents import create_tool_calling_agent
|
10 |
+
from langchain.agents import AgentExecutor
|
11 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
12 |
+
import operator
|
13 |
+
|
14 |
+
load_dotenv()
|
15 |
+
|
16 |
+
# Define the agent state
|
17 |
+
class AgentState(TypedDict):
|
18 |
+
messages: Annotated[Sequence[BaseMessage], operator.add]
|
19 |
+
sender: str
|
20 |
+
|
21 |
+
# Initialize tools
|
22 |
+
@tool
|
23 |
+
def wikipedia_search(query: str) -> str:
|
24 |
+
"""Search Wikipedia for information."""
|
25 |
+
return WikipediaAPIWrapper().run(query)
|
26 |
+
|
27 |
+
@tool
|
28 |
+
def web_search(query: str, num_results: int = 3) -> list:
|
29 |
+
"""Search the web for current information."""
|
30 |
+
return DuckDuckGoSearchResults(num_results=num_results).run(query)
|
31 |
+
|
32 |
+
@tool
|
33 |
+
def calculate(expression: str) -> str:
|
34 |
+
"""Evaluate mathematical expressions."""
|
35 |
+
from langchain.chains import LLMMathChain
|
36 |
+
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
|
37 |
+
return LLMMathChain(llm=llm).run(expression)
|
38 |
+
|
39 |
+
class AdvancedAIAgent:
|
40 |
+
def __init__(self, model_name="gpt-4-turbo"):
|
41 |
+
# Initialize tools and LLM
|
42 |
+
self.tools = [wikipedia_search, web_search, calculate]
|
43 |
+
self.llm = ChatOpenAI(model=model_name, temperature=0.7)
|
44 |
+
|
45 |
+
# Create the agent
|
46 |
+
self.agent = self._create_agent()
|
47 |
+
self.tool_executor = ToolExecutor(self.tools)
|
48 |
+
|
49 |
+
# Build the graph workflow
|
50 |
+
self.workflow = self._build_graph()
|
51 |
+
|
52 |
+
def _create_agent(self) -> AgentExecutor:
|
53 |
+
"""Create the agent with tools and prompt"""
|
54 |
+
prompt = ChatPromptTemplate.from_messages([
|
55 |
+
("system", "You are a helpful AI assistant. Use tools when needed."),
|
56 |
+
MessagesPlaceholder(variable_name="messages"),
|
57 |
+
MessagesPlaceholder(variable_name="agent_scratchpad"),
|
58 |
+
])
|
59 |
+
|
60 |
+
agent = create_tool_calling_agent(self.llm, self.tools, prompt)
|
61 |
+
return AgentExecutor(agent=agent, tools=self.tools, verbose=True)
|
62 |
+
|
63 |
+
def _build_graph(self):
|
64 |
+
"""Build the LangGraph workflow"""
|
65 |
+
workflow = StateGraph(AgentState)
|
66 |
+
|
67 |
+
# Define nodes
|
68 |
+
workflow.add_node("agent", self._call_agent)
|
69 |
+
workflow.add_node("tools", self._call_tools)
|
70 |
+
|
71 |
+
# Define edges
|
72 |
+
workflow.set_entry_point("agent")
|
73 |
+
workflow.add_conditional_edges(
|
74 |
+
"agent",
|
75 |
+
self._should_continue,
|
76 |
+
{
|
77 |
+
"continue": "tools",
|
78 |
+
"end": END
|
79 |
+
}
|
80 |
+
)
|
81 |
+
workflow.add_edge("tools", "agent")
|
82 |
+
|
83 |
+
return workflow.compile()
|
84 |
+
|
85 |
+
def _call_agent(self, state: AgentState):
|
86 |
+
"""Execute the agent"""
|
87 |
+
response = self.agent.invoke({"messages": state["messages"]})
|
88 |
+
return {"messages": [response["output"]]}
|
89 |
+
|
90 |
+
def _call_tools(self, state: AgentState):
|
91 |
+
"""Execute tools"""
|
92 |
+
last_message = state["messages"][-1]
|
93 |
+
|
94 |
+
# Find the tool calls
|
95 |
+
tool_calls = last_message.additional_kwargs.get("tool_calls", [])
|
96 |
+
|
97 |
+
# Execute each tool
|
98 |
+
for tool_call in tool_calls:
|
99 |
+
action = ToolInvocation(
|
100 |
+
tool=tool_call["function"]["name"],
|
101 |
+
tool_input=json.loads(tool_call["function"]["arguments"]),
|
102 |
+
)
|
103 |
+
output = self.tool_executor.invoke(action)
|
104 |
+
|
105 |
+
# Create tool message
|
106 |
+
tool_message = ToolMessage(
|
107 |
+
content=str(output),
|
108 |
+
name=action.tool,
|
109 |
+
tool_call_id=tool_call["id"],
|
110 |
+
)
|
111 |
+
state["messages"].append(tool_message)
|
112 |
+
|
113 |
+
return {"messages": state["messages"]}
|
114 |
+
|
115 |
+
def _should_continue(self, state: AgentState):
|
116 |
+
"""Determine if the workflow should continue"""
|
117 |
+
last_message = state["messages"][-1]
|
118 |
+
|
119 |
+
# If no tool calls, end
|
120 |
+
if not last_message.additional_kwargs.get("tool_calls"):
|
121 |
+
return "end"
|
122 |
+
return "continue"
|
123 |
+
|
124 |
+
def __call__(self, query: str) -> dict:
|
125 |
+
"""Process a user query"""
|
126 |
+
# Initialize state
|
127 |
+
state = AgentState(messages=[HumanMessage(content=query)], sender="user")
|
128 |
+
|
129 |
+
# Execute the workflow
|
130 |
+
for output in self.workflow.stream(state):
|
131 |
+
for key, value in output.items():
|
132 |
+
if key == "messages":
|
133 |
+
for message in value:
|
134 |
+
if isinstance(message, BaseMessage):
|
135 |
+
return {
|
136 |
+
"response": message.content,
|
137 |
+
"sources": self._extract_sources(state["messages"]),
|
138 |
+
"steps": self._extract_steps(state["messages"])
|
139 |
+
}
|
140 |
+
|
141 |
+
def _extract_sources(self, messages: Sequence[BaseMessage]) -> list:
|
142 |
+
"""Extract sources from tool messages"""
|
143 |
+
return [
|
144 |
+
f"{msg.additional_kwargs.get('name', 'unknown')}: {msg.content}"
|
145 |
+
for msg in messages
|
146 |
+
if hasattr(msg, 'additional_kwargs') and 'name' in msg.additional_kwargs
|
147 |
+
]
|
148 |
+
|
149 |
+
def _extract_steps(self, messages: Sequence[BaseMessage]) -> list:
|
150 |
+
"""Extract reasoning steps"""
|
151 |
+
steps = []
|
152 |
+
for msg in messages:
|
153 |
+
if hasattr(msg, 'additional_kwargs') and 'tool_calls' in msg.additional_kwargs:
|
154 |
+
for call in msg.additional_kwargs['tool_calls']:
|
155 |
+
steps.append(f"Used {call['function']['name']}: {call['function']['arguments']}")
|
156 |
+
return steps
|
157 |
+
|
158 |
+
# Example usage
|
159 |
+
if __name__ == "__main__":
|
160 |
+
agent = AdvancedAIAgent()
|
161 |
+
|
162 |
+
queries = [
|
163 |
+
"What is the capital of France?",
|
164 |
+
"Calculate 15% of 200",
|
165 |
+
"Tell me about the latest developments in quantum computing"
|
166 |
+
]
|
167 |
+
|
168 |
+
for query in queries:
|
169 |
+
print(f"\nQuestion: {query}")
|
170 |
+
response = agent(query)
|
171 |
+
print(f"Answer: {response['response']}")
|
172 |
+
if response['sources']:
|
173 |
+
print("Sources:")
|
174 |
+
for source in response['sources']:
|
175 |
+
print(f"- {source}")
|
176 |
+
if response['steps']:
|
177 |
+
print("Steps taken:")
|
178 |
+
for step in response['steps']:
|
179 |
+
print(f"- {step}")
|
system_prompt.txt
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# AI Agent System Prompt
|
2 |
+
|
3 |
+
## Role
|
4 |
+
You are an advanced AI assistant named Nexus designed to:
|
5 |
+
- Provide accurate, well-researched information
|
6 |
+
- Perform complex calculations and data analysis
|
7 |
+
- Maintain context across multi-turn conversations
|
8 |
+
- Use tools strategically when needed
|
9 |
+
- Explain your reasoning transparently
|
10 |
+
|
11 |
+
## Core Principles
|
12 |
+
1. **Accuracy First**: Always verify information using tools when uncertain
|
13 |
+
2. **Transparency**: Clearly indicate when using tools and cite sources
|
14 |
+
3. **Efficiency**: Balance thoroughness with response speed
|
15 |
+
4. **Context Awareness**: Maintain conversation history and user preferences
|
16 |
+
5. **Safety**: Avoid harmful, unethical, or dangerous suggestions
|
17 |
+
|
18 |
+
## Tool Usage Guidelines
|
19 |
+
### Available Tools:
|
20 |
+
1. `wikipedia_search`: For factual, historical, or scientific information
|
21 |
+
- Use when: You need reliable, encyclopedic knowledge
|
22 |
+
- Example: "What is the history of the Roman Empire?"
|
23 |
+
|
24 |
+
2. `web_search`: For current events, news, or real-time information
|
25 |
+
- Use when: Information may have changed recently
|
26 |
+
- Example: "What are today's top tech news stories?"
|
27 |
+
|
28 |
+
3. `calculate`: For mathematical operations and data analysis
|
29 |
+
- Use when: Precise calculations are required
|
30 |
+
- Example: "What is 15% of $245 plus 7.5% sales tax?"
|
31 |
+
|
32 |
+
### Tool Selection Rules:
|
33 |
+
- Prefer Wikipedia for established facts
|
34 |
+
- Use web search for time-sensitive queries
|
35 |
+
- Always use calculator for math operations
|
36 |
+
- Never guess when exact information is required
|
37 |
+
|
38 |
+
## Response Formatting
|
39 |
+
Structure responses clearly with:
|
40 |
+
1. Direct answer first
|
41 |
+
2. Supporting details/sources
|
42 |
+
3. Any relevant context or caveats
|
43 |
+
|
44 |
+
For complex questions:
|
45 |
+
- Break down into logical steps
|
46 |
+
- Show intermediate results
|
47 |
+
- Summarize final answer
|
48 |
+
|
49 |
+
## Special Cases
|
50 |
+
- If information is contradictory: Present both sides with sources
|
51 |
+
- If query is ambiguous: Ask clarifying questions
|
52 |
+
- If request is unethical: Politely decline with explanation
|
53 |
+
|
54 |
+
## Personalization
|
55 |
+
- Remember user preferences from conversation history
|
56 |
+
- Adapt tone based on interaction style (technical/casual)
|
57 |
+
- Maintain appropriate level of detail
|
58 |
+
|
59 |
+
## Example Interactions
|
60 |
+
[User] What's the capital of France?
|
61 |
+
[AI] The capital of France is Paris. (Source: Wikipedia)
|
62 |
+
|
63 |
+
[User] Calculate the compound interest on $10,000 at 5% APR for 3 years
|
64 |
+
[AI] Calculating:
|
65 |
+
- Year 1: $10,000 * 1.05 = $10,500
|
66 |
+
- Year 2: $10,500 * 1.05 = $11,025
|
67 |
+
- Year 3: $11,025 * 1.05 = $11,576.25
|
68 |
+
Final amount after 3 years: $11,576.25
|
69 |
+
|
70 |
+
[User] What's happening with AI regulation?
|
71 |
+
[AI] Checking current sources... (performing web search)
|
72 |
+
According to recent reports:
|
73 |
+
1. EU passed AI Act in March 2024 (Source: TechCrunch)
|
74 |
+
2. US released new AI guidelines last month (Source: Reuters)
|