Spaces:
Runtime error
Runtime error
#Imports | |
from langchain_core.tools import tool | |
from langchain_community.tools import DuckDuckGoSearchResults | |
from langchain_openai import ChatOpenAI | |
from langchain_groq import ChatGroq | |
from datetime import datetime | |
from langgraph.graph import StateGraph, END | |
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage | |
from typing import TypedDict, Annotated | |
from langchain_core.messages import AnyMessage | |
from langgraph.graph.message import add_messages | |
from langgraph.graph import START, StateGraph | |
from langgraph.prebuilt import tools_condition, ToolNode | |
import gradio as gr | |
from dotenv import load_dotenv | |
load_dotenv() | |
#Fetch from the space's secrets (previously added) | |
import os | |
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") | |
#LLM Setup | |
# llm = ChatOpenAI(model="gpt-4.1") | |
llm = ChatGroq(model="llama3-70b-8192", api_key=os.getenv("GROQ_API_KEY"), temperature=0.0, max_tokens=1000, top_p=1.0, frequency_penalty=0.0, presence_penalty=0.0) | |
#Tools to be used by the LLM | |
def add(a: float, b: float) -> float: | |
"""Add two numbers.""" | |
return a + b | |
def subtract(a: float, b: float) -> float: | |
"""Subtract the second number from the first.""" | |
return a - b | |
def multiply(a: float, b: float) -> float: | |
"""Multiply two numbers.""" | |
return a * b | |
def divide(a: float, b: float) -> float: | |
"""Divide the first number by the second.""" | |
if b == 0: | |
raise ValueError("Division by zero.") | |
return a / b | |
def get_current_time() -> str: | |
"""Get the current date and time.""" | |
return datetime.now().isoformat() | |
search = DuckDuckGoSearchResults() | |
#Tool List | |
tools = [add, subtract, multiply, divide, get_current_time, search] | |
#Bind LLM with Tools | |
llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=True) | |
#Class to hold the state to be passed through the graph/ flow | |
class AgentState(TypedDict): | |
messages: Annotated[list[AnyMessage], add_messages] | |
#Define the Assistant Node | |
def assistant(state: AgentState) -> AgentState: | |
messages = state["messages"] | |
response = llm_with_tools.invoke(messages) | |
return {"messages": messages + [response]} | |
#Graph | |
builder = StateGraph(AgentState) | |
# Define nodes: these do the work | |
builder.add_node("assistant", assistant) | |
builder.add_node("tools", ToolNode(tools)) | |
# Define edges: these determine how the control flow moves | |
builder.add_edge(START, "assistant") | |
builder.add_conditional_edges( | |
"assistant", | |
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools | |
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END | |
tools_condition, | |
) | |
builder.add_edge("tools", "assistant") | |
react_graph = builder.compile() | |
#Helper function to find the last LLM message/ response | |
def final_ai_message(input: str) -> str: | |
final_ai_message_temp = None | |
for message in reversed(input): | |
if isinstance(message, AIMessage): | |
final_ai_message_temp = message.content | |
return final_ai_message_temp | |
break | |
return final_ai_message_temp | |
sys_prompt = "You are a general AI assistant. I will ask you a question. Report your thoughts, and\nfinish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER].\nYOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated\nlist of numbers and/or strings.\nIf you are asked for a number, don’t use comma to write your number neither use units such as $ or\npercent sign unless specified otherwise.\nIf you are asked for a string, don’t use articles, neither abbreviations (e.g. for cities), and write the\ndigits in plain text unless specified otherwise.\nIf you are asked for a comma separated list, apply the above rules depending of whether the element\nto be put in the list is a number or a string." | |
#Create a function to interact with graph | |
def chat_with_agent(user_input): | |
inputs = { | |
"messages": [ | |
SystemMessage(content=sys_prompt), | |
HumanMessage(content=user_input) | |
] | |
} | |
# Run the graph | |
state = react_graph.invoke(inputs) | |
final_ai_message_text = final_ai_message(state["messages"]) | |
return final_ai_message_text if final_ai_message_text else "Sorry, I couldn't find a response." | |