Spaces:
Running
Running
File size: 2,088 Bytes
cd7207e 926b5b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from langchain_core.messages import HumanMessage, AIMessage
from langchain_community.tools import DuckDuckGoSearchRun
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import LLMChain
import os
class AdvancedAgent:
def __init__(self):
print("AdvancedAgent initialized.")
# Initialize the language model (assumes OPENAI_API_KEY is set)
try:
self.llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.7)
except Exception as e:
print(f"Error initializing LLM: {e}")
raise e
# Initialize web search tool
self.search_tool = DuckDuckGoSearchRun()
# Define prompt template for answering questions
self.prompt_template = PromptTemplate(
input_variables=["question", "context"],
template="""
You are an intelligent agent designed to answer test questions accurately.
Use the provided context (if any) to inform your answer.
If the context is insufficient, rely on your knowledge or indicate if further information is needed.
Question: {question}
Context: {context}
Answer:
"""
)
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
def __call__(self, question: str) -> str:
print(f"Agent processing question (first 50 chars): {question[:50]}...")
try:
# Perform a web search to gather context
search_query = f"{question}"
search_results = self.search_tool.run(search_query)
context = search_results[:1000] # Limit context length
# Generate answer using LLM and context
response = self.chain.run(question=question, context=context)
print(f"Agent generated answer: {response[:50]}...")
return response.strip()
except Exception as e:
print(f"Error processing question: {e}")
return f"Error: Unable to process question - {e}"
``` |