|
"""LangGraph: agent graph w/ tools""" |
|
import os |
|
from dotenv import load_dotenv |
|
from langgraph.graph import START, StateGraph, MessagesState |
|
from langgraph.prebuilt import tools_condition |
|
from langgraph.prebuilt import ToolNode |
|
from langchain_google_genai import ChatGoogleGenerativeAI |
|
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings |
|
from langchain_community.tools.tavily_search import TavilySearchResults |
|
from langchain_community.document_loaders import WikipediaLoader |
|
from langchain_community.document_loaders import ArxivLoader |
|
from langchain_core.messages import SystemMessage, HumanMessage |
|
from langchain_core.tools import tool |
|
from langchain.tools.retriever import create_retriever_tool |
|
|
|
load_dotenv() |
|
|
|
@tool |
|
def multiply(a: int, b: int) -> int: |
|
"""Multiply two numbers. |
|
|
|
Args: |
|
a: first int |
|
b: second int |
|
""" |
|
return a * b |
|
|
|
@tool |
|
def add(a: int, b: int) -> int: |
|
"""Add two numbers. |
|
|
|
Args: |
|
a: first int |
|
b: second int |
|
""" |
|
return a + b |
|
|
|
@tool |
|
def subtract(a: int, b: int) -> int: |
|
"""Subtract two numbers. |
|
|
|
Args: |
|
a: first int |
|
b: second int |
|
""" |
|
return a - b |
|
|
|
@tool |
|
def divide(a: int, b: int) -> int: |
|
"""Divide two numbers. |
|
|
|
Args: |
|
a: first int |
|
b: second int |
|
""" |
|
if b == 0: |
|
raise ValueError("Cannot divide by zero.") |
|
return a / b |
|
|
|
@tool |
|
def modulus(a: int, b: int) -> int: |
|
"""Get the modulus of two numbers. |
|
|
|
Args: |
|
a: first int |
|
b: second int |
|
""" |
|
return a % b |
|
|
|
@tool |
|
def power(a: float, b: float) -> float: |
|
""" |
|
Get the power of two numbers. |
|
Args: |
|
a (float): the first number |
|
b (float): the second number |
|
""" |
|
return a**b |
|
|
|
@tool |
|
def square_root(a: float) -> float | complex: |
|
""" |
|
Get the square root of a number. |
|
Args: |
|
a (float): the number to get the square root of |
|
""" |
|
if a >= 0: |
|
return a**0.5 |
|
return cmath.sqrt(a) |
|
|
|
|
|
@tool |
|
def wiki_search(query: str) -> str: |
|
"""Search Wikipedia for a query and return maximum 2 results. |
|
|
|
Args: |
|
query: The search query.""" |
|
search_docs = WikipediaLoader(query=query, load_max_docs=2).load() |
|
formatted_search_docs = "\n\n---\n\n".join( |
|
[ |
|
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>' |
|
for doc in search_docs |
|
]) |
|
return {"wiki_results": formatted_search_docs} |
|
|
|
@tool |
|
def web_search(query: str) -> str: |
|
"""Search Tavily for a query and return maximum 3 results. |
|
|
|
Args: |
|
query: The search query.""" |
|
|
|
search_docs = TavilySearchResults(max_results=3).invoke(query=query) |
|
formatted_search_docs = "\n\n---\n\n".join( |
|
[ |
|
f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>' |
|
for doc in search_docs |
|
]) |
|
return {"web_results": formatted_search_docs} |
|
|
|
|
|
|
|
with open("system_prompt.txt", "r", encoding="utf-8") as f: |
|
system_prompt = f.read() |
|
|
|
|
|
sys_msg = SystemMessage(content=system_prompt) |
|
|
|
""" |
|
tools = [ |
|
multiply, |
|
add, |
|
subtract, |
|
divide, |
|
modulus, |
|
power, |
|
square_root, |
|
wiki_search, |
|
web_search, |
|
] |
|
""" |
|
tools = [web_search] |
|
|
|
|
|
|
|
def build_graph(provider: str = "google"): |
|
"""Build the graph""" |
|
|
|
if provider == "huggingface": |
|
|
|
""" |
|
llm = ChatHuggingFace( |
|
llm=HuggingFaceEndpoint( |
|
#endpoint_url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf", |
|
#endpoint_url="https://api-inference.huggingface.co/models/Qwen/Qwen3-30B-A3B", |
|
endpoint_url="https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B.Instruct", |
|
#endpoint_url="https://api-inference.huggingface.co/models/Qwen/Qwen3-4B", |
|
temperature=0, |
|
), |
|
) |
|
""" |
|
llm = ChatHuggingFace( |
|
llm=HuggingFaceEndpoint( |
|
repo_id="TinyLlama/TinyLlama-1.1B-Chat-v1.0", |
|
|
|
|
|
|
|
task="text-generation", |
|
|
|
|
|
|
|
temperature=0, |
|
), |
|
|
|
) |
|
|
|
elif provider == "google": |
|
|
|
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0) |
|
|
|
|
|
else: |
|
raise ValueError("Invalid provider. Choose 'huggingface'.") |
|
|
|
|
|
llm_with_tools = llm.bind_tools(tools) |
|
|
|
|
|
def assistant(state: MessagesState): |
|
"""Assistant node""" |
|
return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
builder = StateGraph(MessagesState) |
|
|
|
builder.add_node("assistant", assistant) |
|
builder.add_node("tools", ToolNode(tools)) |
|
|
|
builder.add_edge(START, "assistant") |
|
|
|
builder.add_conditional_edges( |
|
"assistant", |
|
tools_condition, |
|
) |
|
|
|
builder.add_edge("tools", "assistant") |
|
|
|
|
|
return builder.compile() |
|
|
|
|
|
if __name__ == "__main__": |
|
question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?" |
|
|
|
graph = build_graph(provider="huggingface") |
|
|
|
messages = [HumanMessage(content=question)] |
|
messages = graph.invoke({"messages": messages}) |
|
for m in messages["messages"]: |
|
m.pretty_print() |
|
|