File size: 2,478 Bytes
cd7207e
 
8098850
26cbef0
e7ec655
cd7207e
 
 
 
 
 
 
e7ec655
 
 
 
 
 
 
 
 
 
 
cd7207e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26cbef0
cd7207e
 
 
 
26cbef0
 
bb5a371
26cbef0
 
 
 
 
 
cd7207e
 
26cbef0
cd7207e
 
 
 
1882e0e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
from langchain_core.messages import HumanMessage, AIMessage
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.runnables import RunnableSequence
from smolagents import CodeAgent, DuckDuckGoSearchTool, OpenAIServerModel, WikipediaSearchTool
import os

class AdvancedAgent:
    def __init__(self):
        print("AdvancedAgent initialized.")
        # Initialize the language model (assumes OPENAI_API_KEY is set)
        try:
            model = OpenAIServerModel(model_id="gpt-3.5-turbo", temperature=0.7)
            search_tool = DuckDuckGoSearchTool()
            wiki_search = WikipediaSearchTool()

            self.agent = CodeAgent(
                model = model,
                tools=[
                    search_tool,
                    wiki_search
                ]
            )
        except Exception as e:
            print(f"Error initializing LLM: {e}")
            raise e

        # Define prompt template for answering questions
        self.prompt_template = PromptTemplate(
            input_variables=["question", "context"],
            template="""
            You are an intelligent agent designed to answer test questions accurately.
            Use the provided context (if any) to inform your answer.
            If the context is insufficient, rely on your knowledge or indicate if further information is needed.
            Question: {question}
            Context: {context}
            Answer: 
            """
        )
        self.chain = self.prompt_template | self.llm

    def __call__(self, question: str) -> str:
        print(f"Agent processing question (first 50 chars): {question[:50]}...")
        try:
            # Avoid rate limiting with a delay
            time.sleep(1)  # Adjust delay based on testing
            
            # Perform web search
            search_results = self.search_tool.run(question)
            if not search_results:
                context = "No search results found."
            else:
                context = search_results[:1000]  # Limit context length

            # Generate answer using LLM and context
            response = self.chain.invoke({"question": question, "context": context})
            print(f"Agent generated answer: {response[:50]}...")
            return response.strip()
        except Exception as e:
            print(f"Error processing question: {e}")
            return f"Error: Unable to process question - {e}"