Spaces:
Running
Running
from langchain_core.messages import HumanMessage, AIMessage | |
from langchain_community.tools import DuckDuckGoSearchRun | |
from langchain.prompts import PromptTemplate | |
from langchain_community.chat_models import ChatOpenAI | |
from langchain.chains import LLMChain | |
import os | |
class AdvancedAgent: | |
def __init__(self): | |
print("AdvancedAgent initialized.") | |
# Initialize the language model (assumes OPENAI_API_KEY is set) | |
try: | |
self.llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.7) | |
except Exception as e: | |
print(f"Error initializing LLM: {e}") | |
raise e | |
# Initialize web search tool | |
self.search_tool = DuckDuckGoSearchRun() | |
# Define prompt template for answering questions | |
self.prompt_template = PromptTemplate( | |
input_variables=["question", "context"], | |
template=""" | |
You are an intelligent agent designed to answer test questions accurately. | |
Use the provided context (if any) to inform your answer. | |
If the context is insufficient, rely on your knowledge or indicate if further information is needed. | |
Question: {question} | |
Context: {context} | |
Answer: | |
""" | |
) | |
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template) | |
def __call__(self, question: str) -> str: | |
print(f"Agent processing question (first 50 chars): {question[:50]}...") | |
try: | |
# Perform a web search to gather context | |
search_query = f"{question}" | |
search_results = self.search_tool.run(search_query) | |
context = search_results[:1000] # Limit context length | |
# Generate answer using LLM and context | |
response = self.chain.run(question=question, context=context) | |
print(f"Agent generated answer: {response[:50]}...") | |
return response.strip() | |
except Exception as e: | |
print(f"Error processing question: {e}") | |
return f"Error: Unable to process question - {e}" | |
``` |