File size: 5,552 Bytes
062a4b0 9333f04 062a4b0 4e13619 9333f04 062a4b0 4e13619 062a4b0 9333f04 062a4b0 4e13619 062a4b0 9333f04 062a4b0 4e13619 062a4b0 4e13619 9333f04 f5011ff 9333f04 cc2350e 3c7f905 062a4b0 4e13619 062a4b0 4e13619 9333f04 062a4b0 9333f04 4e13619 062a4b0 4e13619 062a4b0 4e13619 9333f04 062a4b0 9333f04 062a4b0 4e13619 062a4b0 9333f04 4e13619 062a4b0 4e13619 9333f04 062a4b0 9333f04 062a4b0 9333f04 062a4b0 4e13619 9333f04 4e13619 715b2e1 9333f04 4e13619 9333f04 4e13619 9333f04 4e13619 9333f04 062a4b0 9333f04 062a4b0 4e13619 9333f04 062a4b0 4e13619 062a4b0 4e13619 062a4b0 4e13619 062a4b0 4e13619 062a4b0 4e13619 062a4b0 4e13619 9333f04 062a4b0 4e13619 9333f04 4e13619 9333f04 062a4b0 4e13619 715b2e1 4e13619 715b2e1 4e13619 715b2e1 4e13619 715b2e1 4e13619 715b2e1 4e13619 715b2e1 4e13619 715b2e1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import os
import logging
from typing import List, Dict, Any, Tuple
from langchain.tools import BaseTool
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_huggingface import HuggingFaceEndpoint
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition, ToolNode
from tools import WebSearchTool, WebContentTool, WikipediaSearchTool
#, AudioToTextTool, SpreadsheetParserTool, StringUtilitiesTool
import config
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[logging.StreamHandler()]
)
logger = logging.getLogger("Agent")
class LangGraphAgent:
"""
Advanced agent using LangGraph and the Qwen3-30B-A3B model
"""
def __init__(self, verbose: bool = True):
"""
Initialize the agent with its tools and configuration.
"""
self.verbose = verbose
# Initialize the model
self.llm = HuggingFaceEndpoint(
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
huggingfacehub_api_token=config.HUGGINGFACE_API_KEY,
task="text-generation",
temperature = config.TEMPERATURE
)
# Load tools
self.tools = self._setup_tools()
# Create the execution graph
self.workflow = self._create_workflow()
def _setup_tools(self) -> List[BaseTool]:
"""Configure and return the available tools for the agent"""
tools = []
# Add tools according to configuration
if config.ENABLE_WEB_SEARCH:
tools.append(WebSearchTool())
tools.append(WebContentTool())
if config.ENABLE_WIKIPEDIA_SEARCH:
tools.append(WikipediaSearchTool())
# if config.ENABLE_AUDIO_TO_TEXT:
# tools.append(AudioToTextTool())
# if config.ENABLE_SPREADSHEET_PARSER:
# tools.append(SpreadsheetParserTool())
# if config.ENABLE_STRING_UTILITIES:
# tools.append(StringUtilitiesTool())
logger.info(f"Agent initialized with {len(tools)} tools")
return tools
def _create_workflow(self) -> StateGraph:
"""Create the agent's execution graph"""
# Define the prompt
prompt = ChatPromptTemplate.from_messages([
("system", config.DEFAULT_SYSTEM_MESSAGE),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
])
# Define the nodes
def assistant(state: MessagesState):
# Use the prompt to format messages
messages = prompt.format_messages(
chat_history=state["messages"][:-1],
input=state["messages"][-1].content,
agent_scratchpad=[]
)
return {"messages": [self.llm.invoke(messages)]}
# Create the graph
builder = StateGraph(MessagesState)
# Add nodes
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(self.tools))
# Add edges
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
tools_condition,
)
builder.add_edge("tools", "assistant")
# Compile the graph
return builder.compile()
def __call__(self, question: str) -> str:
"""
Answer a question using the agent.
Args:
question: The question to answer
Returns:
The agent's answer
"""
if not question.strip():
return "Please ask a question."
try:
logger.info(f"Question received: {question[:50]}...")
# Execute the workflow
result = self.workflow.invoke({
"messages": [HumanMessage(content=question)]
})
# Extract the final answer
final_message = result["messages"][-1].content
# if "FINAL ANSWER:" in final_message:
# return final_message.split("FINAL ANSWER:")[1].strip()
return final_message
except Exception as e:
logger.error(f"Error processing the question: {str(e)}")
return f"Sorry, an error occurred: {str(e)}"
if __name__ == "__main__":
# Create an instance of the agent
agent = LangGraphAgent(verbose=True)
# Question to test
question = "How many studio albums were published by Mercedes Sosa between 2000 and 2009 (included)? You can use the latest 2022 version of english wikipedia."
# Get the answer
response = agent(question)
# Display the answer
print("\nQuestion:", question)
print("\nAnswer:", response)
# Question to test
question = ".rewsna eht sa \"tfel\" drow eht fo etisoppo eht etirw ,ecnetnes siht dnatsrednu uoy fI"
# Obtenir la réponse
response = agent(question)
# Afficher la réponse
print("\nQuestion:", question)
print("\nRéponse:", response)
|