Test_Magus / agent304.py
SergeyO7's picture
Rename agent.py to agent304.py
bce8d68 verified
raw
history blame
2.48 kB
from langchain_core.messages import HumanMessage, AIMessage
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.runnables import RunnableSequence
from smolagents import CodeAgent, DuckDuckGoSearchTool, OpenAIServerModel, WikipediaSearchTool
import os
class AdvancedAgent:
def __init__(self):
print("AdvancedAgent initialized.")
# Initialize the language model (assumes OPENAI_API_KEY is set)
try:
model = OpenAIServerModel(model_id="gpt-3.5-turbo", temperature=0.7)
search_tool = DuckDuckGoSearchTool()
wiki_search = WikipediaSearchTool()
self.agent = CodeAgent(
model = model,
tools=[
search_tool,
wiki_search
]
)
except Exception as e:
print(f"Error initializing LLM: {e}")
raise e
# Define prompt template for answering questions
self.prompt_template = PromptTemplate(
input_variables=["question", "context"],
template="""
You are an intelligent agent designed to answer test questions accurately.
Use the provided context (if any) to inform your answer.
If the context is insufficient, rely on your knowledge or indicate if further information is needed.
Question: {question}
Context: {context}
Answer:
"""
)
self.chain = self.prompt_template | self.llm
def __call__(self, question: str) -> str:
print(f"Agent processing question (first 50 chars): {question[:50]}...")
try:
# Avoid rate limiting with a delay
time.sleep(1) # Adjust delay based on testing
# Perform web search
search_results = self.search_tool.run(question)
if not search_results:
context = "No search results found."
else:
context = search_results[:1000] # Limit context length
# Generate answer using LLM and context
response = self.chain.invoke({"question": question, "context": context})
print(f"Agent generated answer: {response[:50]}...")
return response.strip()
except Exception as e:
print(f"Error processing question: {e}")
return f"Error: Unable to process question - {e}"