File size: 6,531 Bytes
29179b5
 
 
 
2364c68
29179b5
2364c68
431317f
 
 
 
29179b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431317f
 
 
 
 
29179b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431317f
29179b5
 
 
 
 
 
 
 
2364c68
29179b5
 
 
 
 
 
2364c68
29179b5
 
 
 
 
 
2364c68
29179b5
 
 
 
 
 
 
 
2364c68
29179b5
 
 
2364c68
29179b5
 
2364c68
29179b5
 
016d20c
29179b5
 
016d20c
29179b5
 
 
 
016d20c
29179b5
016d20c
29179b5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
from llama_index.core.agent.workflow import ReActAgent
from llama_index.core.llms import LLM
import os
from typing import Optional, List, Any
from llama_index.llms.openai import OpenAI
from llama_index.llms.anthropic import Anthropic

from tools.web_tools import (
    tavily_tool,
    wikipedia_tool
)
class GaiaAgent(ReActAgent):
    """
    A flexible ReActAgent for GAIA benchmark tasks that supports multiple LLM providers.
    
    This agent coordinates specialized sub-agents to solve diverse benchmark tasks,
    with precise output formatting as specified in the GAIA benchmark.
    """
    
    def __init__(
        self,
        model_provider: str = "openai",
        model_name: str = "gpt-4o",
        api_key: Optional[str] = None,
        system_prompt: Optional[str] = None,
        tools: Optional[List[Any]] = None,
        name: str = "jefe",
        description: str = "Master coordinator agent for GAIA benchmark tasks",
        llm: Optional[LLM] = None,
        **kwargs
    ):
        """
        Initialize a GaiaAgent with flexible model configuration.
        
        Args:
            model_provider: The LLM provider to use ("openai", "anthropic", "cohere", etc.)
            model_name: The specific model name to use
            api_key: API key for the provider (defaults to environment variable)
            system_prompt: Custom system prompt (defaults to GAIA benchmark prompt)
            tools: List of tools to make available to the agent
            name: Name of the agent
            description: Description of the agent
            llm: Pre-configured LLM instance (if provided, model_provider and model_name are ignored)
            **kwargs: Additional parameters to pass to ReActAgent
        """
        from tools.text_tools import reverse_text_tool
        
        # Use pre-configured LLM if provided, otherwise initialize based on provider
        if llm is None:
            llm = self._initialize_llm(model_provider, model_name, api_key)
        
        # Use default tools if not provided
        if tools is None:
            tools = [
                reverse_text_tool,
                wikipedia_tool,
                tavily_tool
                ]
            
        # Use default system prompt if not provided
        if system_prompt is None:
            system_prompt = self._get_default_system_prompt()
            
        # Initialize the parent ReActAgent
        super().__init__(
            name=name,
            description=description,
            llm=llm,
            system_prompt=system_prompt,
            tools=tools,
            **kwargs
        )
        
    def _initialize_llm(self, model_provider: str, model_name: str, api_key: Optional[str]) -> LLM:
        """Initialize the appropriate LLM based on the provider."""
        model_provider = model_provider.lower()
        
        if model_provider == "openai":
            return OpenAI(model=model_name, api_key=api_key or os.getenv("OPENAI_API_KEY"))
            
        elif model_provider == "anthropic":
            return Anthropic(model=model_name, api_key=api_key or os.getenv("ANTHROPIC_API_KEY"))
                        
        else:
            raise ValueError(f"Unsupported model provider: {model_provider}. "
                            f"Supported providers are: openai, anthropic, cohere, huggingface, llama")
                            
    def _get_default_system_prompt(self) -> str:
        """Return the default system prompt for GAIA benchmark tasks."""
        return """
        You are the lead coordinator for a team of specialized AI agents tackling the GAIA benchmark. Your job is to analyze each question with extreme precision, determine the exact format required for the answer, break the task into logical steps, and either solve it yourself or delegate to the appropriate specialized agents.

        ## QUESTION ANALYSIS PROCESS
        1. First, carefully read and parse the entire question
        2. Identify the EXACT output format required (single word, name, number, comma-separated list, etc.)
        3. Note any special formatting requirements (alphabetical order, specific notation, etc.)
        4. Identify what type of task this is (research, audio analysis, video analysis, code execution, data analysis, etc.)
        5. Break the question into sequential steps

        ## DELEGATION GUIDELINES
        - video_analyst: Use for all YouTube video analysis, visual content identification, or scene description
        - audio_analyst: Use for transcribing audio files, identifying speakers, or extracting information from recordings
        - researcher: Use for factual queries, literature searches, finding specific information in papers or websites
        - code_analyst: Use for executing, debugging or analyzing code snippets
        - excel_analyst: Use for analyzing spreadsheets, calculating values, or extracting data from Excel files

        ## CRITICAL RESPONSE RULES
        - NEVER include explanations in your final answer
        - NEVER include phrases like "the answer is" or "the result is"
        - Return EXACTLY what was asked for - no more, no less
        - If asked for a name, return ONLY the name
        - If asked for a number, return ONLY the number
        - If asked for a list, format it EXACTLY as specified (comma-separated, alphabetical, etc.)
        - Double-check your answer against the exact output requirements before submitting

        ## EXAMPLES OF PROPER RESPONSES:
        Question: "What is the first name of the scientist who discovered penicillin?"
        Correct answer: Alexander

        Question: "List the prime numbers between 10 and 20 in ascending order."
        Correct answer: 11, 13, 17, 19

        Question: "If you understand this sentence, write the opposite of the word 'right' as the answer."
        Correct answer: left

        Question: "How many at bats did the Yankee with the most walks in the 1977 regular season have that same season?"
        Correct answer: 572

        For questions with reverse text:
        1. Use your reverse_text_tool to process the text
        2. Understand the instruction in the reversed text
        3. Follow the instruction exactly

        After you have the final answer, verify one last time that it meets ALL formatting requirements from the question before submitting.

        IMPORTANT: Your value is in providing PRECISELY what was asked for - not in showing your work or explaining how you got there.
        """