Spaces:
Running
Running
Create agent.py
Browse files
agent.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.messages import HumanMessage, AIMessage
|
2 |
+
from langchain_community.tools import DuckDuckGoSearchRun
|
3 |
+
from langchain.prompts import PromptTemplate
|
4 |
+
from langchain_community.chat_models import ChatOpenAI
|
5 |
+
from langchain.chains import LLMChain
|
6 |
+
import os
|
7 |
+
|
8 |
+
class AdvancedAgent:
|
9 |
+
def __init__(self):
|
10 |
+
print("AdvancedAgent initialized.")
|
11 |
+
# Initialize the language model (assumes OPENAI_API_KEY is set)
|
12 |
+
try:
|
13 |
+
self.llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.7)
|
14 |
+
except Exception as e:
|
15 |
+
print(f"Error initializing LLM: {e}")
|
16 |
+
raise e
|
17 |
+
|
18 |
+
# Initialize web search tool
|
19 |
+
self.search_tool = DuckDuckGoSearchRun()
|
20 |
+
|
21 |
+
# Define prompt template for answering questions
|
22 |
+
self.prompt_template = PromptTemplate(
|
23 |
+
input_variables=["question", "context"],
|
24 |
+
template="""
|
25 |
+
You are an intelligent agent designed to answer test questions accurately.
|
26 |
+
Use the provided context (if any) to inform your answer.
|
27 |
+
If the context is insufficient, rely on your knowledge or indicate if further information is needed.
|
28 |
+
Question: {question}
|
29 |
+
Context: {context}
|
30 |
+
Answer:
|
31 |
+
"""
|
32 |
+
)
|
33 |
+
self.chain = LLMChain(llm=self.llm, prompt=self.prompt_template)
|
34 |
+
|
35 |
+
def __call__(self, question: str) -> str:
|
36 |
+
print(f"Agent processing question (first 50 chars): {question[:50]}...")
|
37 |
+
try:
|
38 |
+
# Perform a web search to gather context
|
39 |
+
search_query = f"{question}"
|
40 |
+
search_results = self.search_tool.run(search_query)
|
41 |
+
context = search_results[:1000] # Limit context length
|
42 |
+
|
43 |
+
# Generate answer using LLM and context
|
44 |
+
response = self.chain.run(question=question, context=context)
|
45 |
+
print(f"Agent generated answer: {response[:50]}...")
|
46 |
+
return response.strip()
|
47 |
+
except Exception as e:
|
48 |
+
print(f"Error processing question: {e}")
|
49 |
+
return f"Error: Unable to process question - {e}"
|