Spaces:
Sleeping
Sleeping
import os | |
import logging | |
from typing import List, Dict, Any, Tuple | |
from langchain.tools import BaseTool | |
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage | |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder | |
from langchain_core.output_parsers import StrOutputParser | |
from langchain_core.runnables import RunnablePassthrough | |
from langchain_huggingface import HuggingFaceEndpoint | |
from langgraph.graph import START, StateGraph, MessagesState | |
from langgraph.prebuilt import tools_condition, ToolNode | |
from tools import WebSearchTool, WebContentTool, WikipediaSearchTool | |
#, AudioToTextTool, SpreadsheetParserTool, StringUtilitiesTool | |
import config | |
# Configure logging | |
logging.basicConfig( | |
level=logging.INFO, | |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', | |
handlers=[logging.StreamHandler()] | |
) | |
logger = logging.getLogger("Agent") | |
class LangGraphAgent: | |
""" | |
Advanced agent using LangGraph and the Qwen3-30B-A3B model | |
""" | |
def __init__(self, verbose: bool = True): | |
""" | |
Initialize the agent with its tools and configuration. | |
""" | |
self.verbose = verbose | |
# Initialize the model | |
self.llm = HuggingFaceEndpoint( | |
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct", | |
huggingfacehub_api_token=config.HUGGINGFACE_API_KEY, | |
task="text-generation", | |
temperature = config.TEMPERATURE | |
) | |
# Load tools | |
self.tools = self._setup_tools() | |
# Create the execution graph | |
self.workflow = self._create_workflow() | |
def _setup_tools(self) -> List[BaseTool]: | |
"""Configure and return the available tools for the agent""" | |
tools = [] | |
# Add tools according to configuration | |
if config.ENABLE_WEB_SEARCH: | |
tools.append(WebSearchTool()) | |
tools.append(WebContentTool()) | |
if config.ENABLE_WIKIPEDIA_SEARCH: | |
tools.append(WikipediaSearchTool()) | |
# if config.ENABLE_AUDIO_TO_TEXT: | |
# tools.append(AudioToTextTool()) | |
# if config.ENABLE_SPREADSHEET_PARSER: | |
# tools.append(SpreadsheetParserTool()) | |
# if config.ENABLE_STRING_UTILITIES: | |
# tools.append(StringUtilitiesTool()) | |
logger.info(f"Agent initialized with {len(tools)} tools") | |
return tools | |
def _create_workflow(self) -> StateGraph: | |
"""Create the agent's execution graph""" | |
# Define the prompt | |
prompt = ChatPromptTemplate.from_messages([ | |
("system", config.DEFAULT_SYSTEM_MESSAGE), | |
MessagesPlaceholder(variable_name="chat_history"), | |
("human", "{input}"), | |
MessagesPlaceholder(variable_name="agent_scratchpad"), | |
]) | |
# Define the nodes | |
def assistant(state: MessagesState): | |
# Use the prompt to format messages | |
messages = prompt.format_messages( | |
chat_history=state["messages"][:-1], | |
input=state["messages"][-1].content, | |
agent_scratchpad=[] | |
) | |
return {"messages": [self.llm.invoke(messages)]} | |
# Create the graph | |
builder = StateGraph(MessagesState) | |
# Add nodes | |
builder.add_node("assistant", assistant) | |
builder.add_node("tools", ToolNode(self.tools)) | |
# Add edges | |
builder.add_edge(START, "assistant") | |
builder.add_conditional_edges( | |
"assistant", | |
tools_condition, | |
) | |
builder.add_edge("tools", "assistant") | |
# Compile the graph | |
return builder.compile() | |
def __call__(self, question: str) -> str: | |
""" | |
Answer a question using the agent. | |
Args: | |
question: The question to answer | |
Returns: | |
The agent's answer | |
""" | |
if not question.strip(): | |
return "Please ask a question." | |
try: | |
logger.info(f"Question received: {question[:50]}...") | |
# Execute the workflow | |
result = self.workflow.invoke({ | |
"messages": [HumanMessage(content=question)] | |
}) | |
# Extract the final answer | |
final_message = result["messages"][-1].content | |
# if "FINAL ANSWER:" in final_message: | |
# return final_message.split("FINAL ANSWER:")[1].strip() | |
return final_message | |
except Exception as e: | |
logger.error(f"Error processing the question: {str(e)}") | |
return f"Sorry, an error occurred: {str(e)}" | |
if __name__ == "__main__": | |
# Create an instance of the agent | |
agent = LangGraphAgent(verbose=True) | |
# Question to test | |
question = "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia." | |
# Get the answer | |
response = agent(question) | |
# Display the answer | |
print("\nQuestion:", question) | |
print("\nAnswer:", response) | |
# Question to test | |
question = ".rewsna eht sa \"tfel\" drow eht fo etisoppo eht etirw ,ecnetnes siht dnatsrednu uoy fI" | |
# Obtenir la réponse | |
response = agent(question) | |
# Afficher la réponse | |
print("\nQuestion:", question) | |
print("\nRéponse:", response) | |