|
import os |
|
import logging |
|
from typing import List, Dict, Any, Tuple |
|
from langchain.tools import BaseTool |
|
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage |
|
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder |
|
from langchain_core.output_parsers import StrOutputParser |
|
from langchain_core.runnables import RunnablePassthrough |
|
from langchain_huggingface import HuggingFaceEndpoint |
|
from langgraph.graph import START, StateGraph, MessagesState |
|
from langgraph.prebuilt import tools_condition, ToolNode |
|
|
|
from tools import WebSearchTool, WebContentTool, WikipediaSearchTool |
|
|
|
|
|
import config |
|
|
|
|
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', |
|
handlers=[logging.StreamHandler()] |
|
) |
|
logger = logging.getLogger("Agent") |
|
|
|
class LangGraphAgent: |
|
""" |
|
Advanced agent using LangGraph and the Qwen3-30B-A3B model |
|
""" |
|
|
|
def __init__(self, verbose: bool = True): |
|
""" |
|
Initialize the agent with its tools and configuration. |
|
""" |
|
self.verbose = verbose |
|
|
|
|
|
self.llm = HuggingFaceEndpoint( |
|
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct", |
|
huggingfacehub_api_token=config.HUGGINGFACE_API_KEY, |
|
task="text-generation", |
|
temperature = config.TEMPERATURE |
|
) |
|
|
|
|
|
self.tools = self._setup_tools() |
|
|
|
|
|
self.workflow = self._create_workflow() |
|
|
|
def _setup_tools(self) -> List[BaseTool]: |
|
"""Configure and return the available tools for the agent""" |
|
tools = [] |
|
|
|
|
|
if config.ENABLE_WEB_SEARCH: |
|
tools.append(WebSearchTool()) |
|
tools.append(WebContentTool()) |
|
|
|
if config.ENABLE_WIKIPEDIA_SEARCH: |
|
tools.append(WikipediaSearchTool()) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logger.info(f"Agent initialized with {len(tools)} tools") |
|
return tools |
|
|
|
def _create_workflow(self) -> StateGraph: |
|
"""Create the agent's execution graph""" |
|
|
|
|
|
prompt = ChatPromptTemplate.from_messages([ |
|
("system", config.DEFAULT_SYSTEM_MESSAGE), |
|
MessagesPlaceholder(variable_name="chat_history"), |
|
("human", "{input}"), |
|
MessagesPlaceholder(variable_name="agent_scratchpad"), |
|
]) |
|
|
|
|
|
def assistant(state: MessagesState): |
|
|
|
messages = prompt.format_messages( |
|
chat_history=state["messages"][:-1], |
|
input=state["messages"][-1].content, |
|
agent_scratchpad=[] |
|
) |
|
return {"messages": [self.llm.invoke(messages)]} |
|
|
|
|
|
builder = StateGraph(MessagesState) |
|
|
|
|
|
builder.add_node("assistant", assistant) |
|
builder.add_node("tools", ToolNode(self.tools)) |
|
|
|
|
|
builder.add_edge(START, "assistant") |
|
builder.add_conditional_edges( |
|
"assistant", |
|
tools_condition, |
|
) |
|
builder.add_edge("tools", "assistant") |
|
|
|
|
|
return builder.compile() |
|
|
|
def __call__(self, question: str) -> str: |
|
""" |
|
Answer a question using the agent. |
|
|
|
Args: |
|
question: The question to answer |
|
|
|
Returns: |
|
The agent's answer |
|
""" |
|
if not question.strip(): |
|
return "Please ask a question." |
|
|
|
try: |
|
logger.info(f"Question received: {question[:50]}...") |
|
|
|
|
|
result = self.workflow.invoke({ |
|
"messages": [HumanMessage(content=question)] |
|
}) |
|
|
|
|
|
final_message = result["messages"][-1].content |
|
|
|
|
|
return final_message |
|
|
|
except Exception as e: |
|
logger.error(f"Error processing the question: {str(e)}") |
|
return f"Sorry, an error occurred: {str(e)}" |
|
|
|
if __name__ == "__main__": |
|
|
|
agent = LangGraphAgent(verbose=True) |
|
|
|
|
|
question = "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia." |
|
|
|
|
|
response = agent(question) |
|
|
|
|
|
print("\nQuestion:", question) |
|
print("\nAnswer:", response) |
|
|
|
|
|
question = ".rewsna eht sa \"tfel\" drow eht fo etisoppo eht etirw ,ecnetnes siht dnatsrednu uoy fI" |
|
|
|
|
|
response = agent(question) |
|
|
|
print("\nQuestion:", question) |
|
print("\nRéponse:", response) |
|
|