Commit
·
72298ff
1
Parent(s):
78f636a
new prompt
Browse files- agent.py +25 -38
- system_prompt.txt +6 -11
agent.py
CHANGED
@@ -18,67 +18,59 @@ from supabase.client import Client, create_client
|
|
18 |
|
19 |
load_dotenv()
|
20 |
|
|
|
|
|
21 |
@tool
|
22 |
def multiply(a: int, b: int) -> int:
|
23 |
-
"""Multiply two integers
|
24 |
return a * b
|
25 |
|
26 |
@tool
|
27 |
def add(a: int, b: int) -> int:
|
28 |
-
"""Add two integers
|
29 |
return a + b
|
30 |
|
31 |
@tool
|
32 |
def subtract(a: int, b: int) -> int:
|
33 |
-
"""Subtract
|
34 |
return a - b
|
35 |
|
36 |
@tool
|
37 |
def divide(a: int, b: int) -> float:
|
38 |
-
"""Divide
|
39 |
if b == 0:
|
40 |
raise ValueError("Cannot divide by zero.")
|
41 |
return a / b
|
42 |
|
43 |
@tool
|
44 |
def modulus(a: int, b: int) -> int:
|
45 |
-
"""Return
|
46 |
return a % b
|
47 |
|
48 |
@tool
|
49 |
def wiki_search(query: str) -> str:
|
50 |
-
"""Search Wikipedia for a query
|
51 |
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
52 |
-
|
53 |
-
[f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>' for doc in search_docs]
|
54 |
-
)
|
55 |
-
return {"wiki_results": formatted}
|
56 |
|
57 |
@tool
|
58 |
def web_search(query: str) -> str:
|
59 |
-
"""Search
|
60 |
search_docs = TavilySearchResults(max_results=3).invoke(query=query)
|
61 |
-
|
62 |
-
[f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>' for doc in search_docs]
|
63 |
-
)
|
64 |
-
return {"web_results": formatted}
|
65 |
|
66 |
@tool
|
67 |
def arvix_search(query: str) -> str:
|
68 |
-
"""Search Arxiv for a query
|
69 |
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
|
70 |
-
|
71 |
-
[f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>' for doc in search_docs]
|
72 |
-
)
|
73 |
-
return {"arvix_results": formatted}
|
74 |
|
75 |
-
#
|
76 |
with open("system_prompt.txt", "r", encoding="utf-8") as f:
|
77 |
system_prompt = f.read()
|
78 |
-
|
79 |
sys_msg = SystemMessage(content=system_prompt)
|
80 |
|
81 |
-
#
|
82 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
|
83 |
supabase: Client = create_client(os.environ.get("SUPABASE_URL"), os.environ.get("SUPABASE_SERVICE_KEY"))
|
84 |
vector_store = SupabaseVectorStore(
|
@@ -88,19 +80,10 @@ vector_store = SupabaseVectorStore(
|
|
88 |
query_name="match_documents_langchain",
|
89 |
)
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
name="Question Search",
|
94 |
-
description="A tool to retrieve similar questions from a vector store."
|
95 |
-
)
|
96 |
-
|
97 |
-
# Define tool list
|
98 |
-
tools = [
|
99 |
-
multiply, add, subtract, divide, modulus,
|
100 |
-
wiki_search, web_search, arvix_search
|
101 |
-
]
|
102 |
|
103 |
-
# Build
|
104 |
def build_graph(provider: str = "groq"):
|
105 |
if provider == "google":
|
106 |
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
@@ -119,11 +102,15 @@ def build_graph(provider: str = "groq"):
|
|
119 |
llm_with_tools = llm.bind_tools(tools)
|
120 |
|
121 |
def assistant(state: MessagesState):
|
122 |
-
|
|
|
|
|
|
|
|
|
123 |
|
124 |
def retriever(state: MessagesState):
|
125 |
-
|
126 |
-
example_msg = HumanMessage(content=f"
|
127 |
return {"messages": [sys_msg] + state["messages"] + [example_msg]}
|
128 |
|
129 |
builder = StateGraph(MessagesState)
|
|
|
18 |
|
19 |
load_dotenv()
|
20 |
|
21 |
+
# === Tools ===
|
22 |
+
|
23 |
@tool
|
24 |
def multiply(a: int, b: int) -> int:
|
25 |
+
"""Multiply two integers."""
|
26 |
return a * b
|
27 |
|
28 |
@tool
|
29 |
def add(a: int, b: int) -> int:
|
30 |
+
"""Add two integers."""
|
31 |
return a + b
|
32 |
|
33 |
@tool
|
34 |
def subtract(a: int, b: int) -> int:
|
35 |
+
"""Subtract b from a."""
|
36 |
return a - b
|
37 |
|
38 |
@tool
|
39 |
def divide(a: int, b: int) -> float:
|
40 |
+
"""Divide a by b."""
|
41 |
if b == 0:
|
42 |
raise ValueError("Cannot divide by zero.")
|
43 |
return a / b
|
44 |
|
45 |
@tool
|
46 |
def modulus(a: int, b: int) -> int:
|
47 |
+
"""Return a modulo b."""
|
48 |
return a % b
|
49 |
|
50 |
@tool
|
51 |
def wiki_search(query: str) -> str:
|
52 |
+
"""Search Wikipedia for a query."""
|
53 |
search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
|
54 |
+
return "\n\n---\n\n".join([doc.page_content for doc in search_docs])
|
|
|
|
|
|
|
55 |
|
56 |
@tool
|
57 |
def web_search(query: str) -> str:
|
58 |
+
"""Search the web for a query."""
|
59 |
search_docs = TavilySearchResults(max_results=3).invoke(query=query)
|
60 |
+
return "\n\n---\n\n".join([doc.page_content for doc in search_docs])
|
|
|
|
|
|
|
61 |
|
62 |
@tool
|
63 |
def arvix_search(query: str) -> str:
|
64 |
+
"""Search Arxiv for a query."""
|
65 |
search_docs = ArxivLoader(query=query, load_max_docs=3).load()
|
66 |
+
return "\n\n---\n\n".join([doc.page_content[:1000] for doc in search_docs])
|
|
|
|
|
|
|
67 |
|
68 |
+
# === System Prompt ===
|
69 |
with open("system_prompt.txt", "r", encoding="utf-8") as f:
|
70 |
system_prompt = f.read()
|
|
|
71 |
sys_msg = SystemMessage(content=system_prompt)
|
72 |
|
73 |
+
# === Embeddings and Vector Store ===
|
74 |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
|
75 |
supabase: Client = create_client(os.environ.get("SUPABASE_URL"), os.environ.get("SUPABASE_SERVICE_KEY"))
|
76 |
vector_store = SupabaseVectorStore(
|
|
|
80 |
query_name="match_documents_langchain",
|
81 |
)
|
82 |
|
83 |
+
# === Tools ===
|
84 |
+
tools = [multiply, add, subtract, divide, modulus, wiki_search, web_search, arvix_search]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
+
# === Build Graph ===
|
87 |
def build_graph(provider: str = "groq"):
|
88 |
if provider == "google":
|
89 |
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
|
|
102 |
llm_with_tools = llm.bind_tools(tools)
|
103 |
|
104 |
def assistant(state: MessagesState):
|
105 |
+
response = llm_with_tools.invoke(state["messages"])
|
106 |
+
content = response.content.strip()
|
107 |
+
if "FINAL ANSWER:" in content:
|
108 |
+
content = content.split("FINAL ANSWER:")[-1].strip()
|
109 |
+
return {"messages": [AIMessage(content=content)]}
|
110 |
|
111 |
def retriever(state: MessagesState):
|
112 |
+
similar_question = vector_store.similarity_search(state["messages"][0].content)
|
113 |
+
example_msg = HumanMessage(content=f"Reference: {similar_question[0].page_content}")
|
114 |
return {"messages": [sys_msg] + state["messages"] + [example_msg]}
|
115 |
|
116 |
builder = StateGraph(MessagesState)
|
system_prompt.txt
CHANGED
@@ -1,11 +1,6 @@
|
|
1 |
-
You are a helpful assistant
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
FINAL ANSWER:
|
6 |
-
|
7 |
-
- Your final answer should be a **number** OR as few words as possible OR a comma-separated list.
|
8 |
-
- Do not include explanations, markdown, or any additional text after FINAL ANSWER.
|
9 |
-
- If the answer is a string, do not include articles (e.g., "the", "a") or abbreviations. Write digits in full words if requested.
|
10 |
-
|
11 |
-
Any answer that does not follow the `FINAL ANSWER: ...` format exactly will be considered incorrect.
|
|
|
1 |
+
You are a helpful assistant. Think step-by-step to solve the question. Then output only the final answer as your last message. The final answer must be:
|
2 |
+
- a number (without comma separators or symbols),
|
3 |
+
- a string (no articles or abbreviations),
|
4 |
+
- or a comma-separated list of such elements.
|
5 |
+
Do not include any explanation or prefix like "Answer:", "FINAL ANSWER:", or similar.
|
6 |
+
Return only the answer.
|
|
|
|
|
|
|
|
|
|