drAbreu commited on
Commit
29179b5
·
1 Parent(s): 016d20c

added model selection to main agent class

Browse files
Files changed (2) hide show
  1. agents/llama_index_agent.py +129 -53
  2. app.py +34 -6
agents/llama_index_agent.py CHANGED
@@ -1,65 +1,141 @@
1
- from llama_index.core.agent.workflow import (
2
- AgentWorkflow,
3
- ReActAgent,
4
- FunctionAgent
5
- )
6
- from tools.text_tools import reverse_text_tool
7
  from llama_index.llms.openai import OpenAI
8
- import os
9
 
10
- openai = OpenAI(model="gpt-4o", api_key=os.getenv("OPENAI_API_KEY"))
11
-
12
- main_agent = ReActAgent(
13
- name="jefe",
14
- description="Agent that will receive the queries, understand them, and send them to the correct agents to do the job",
15
- llm=openai,
16
- system_prompt="""
17
- You are a ReActAgent that has a team of AI agents available to solve
18
- questions and challenges from the GAIA Benchmark.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- You must very carefully read the questions, understand them, and divide them into steps.
21
- You can then either answer the steps on your own or distribute them to the most relevant
22
- agents in your team to find the answer for you.
 
 
 
23
 
24
- At the end, once you gather
25
-
26
- The questions will be given to you following the format:
27
- ```
28
- {
29
- 'task_id': '5a0c1adf-205e-4841-a666-7c3ef95def9d',
30
- 'question': 'What is the first name of the only Malko Competition recipient from the 20th Century (after 1977) whose nationality on record is a country that no longer exists?',
31
- 'Level': '1',
32
- 'file_name': ''
33
- }
34
- ```
35
 
36
- If the question has a file attached, the other agents in your team will have the tools to open and
37
- analyze them.
 
 
 
 
 
 
38
 
39
- Once you have all the intermediate steps and you can provide the final answer, make sure that
40
- you are doing so EXACTLY as the answer format is defined in the query.
 
41
 
42
- You also have access to your own tools:
43
- * `reverse_text_tool` --> Reverses the input text
44
 
45
- Send as final answer your last answer formated as expected in the instructions of the question.
 
46
 
47
- IMPORTANT:
48
- Answer ONLY the minimal text that is asked in the query. Not a single character more.
49
 
50
- For example:
 
 
 
51
 
52
- If you are asked for the first name of the discoverer of gravity, your answer should be: `Isaac`
53
 
54
- """,
55
- can_handoff_to=[
56
- "video_analyst",
57
- "audio_analyst",
58
- "researcher",
59
- "code_analyst",
60
- "excel_analyst"
61
- ],
62
- tools=[
63
- reverse_text_tool
64
- ]
65
- )
 
1
+ from llama_index.core.agent.workflow import ReActAgent
2
+ from llama_index.core.llms import LLM
3
+ import os
4
+ from typing import Optional, List, Any
 
 
5
  from llama_index.llms.openai import OpenAI
6
+ from llama_index.llms.anthropic import Anthropic
7
 
8
+ class GaiaAgent(ReActAgent):
9
+ """
10
+ A flexible ReActAgent for GAIA benchmark tasks that supports multiple LLM providers.
11
+
12
+ This agent coordinates specialized sub-agents to solve diverse benchmark tasks,
13
+ with precise output formatting as specified in the GAIA benchmark.
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ model_provider: str = "openai",
19
+ model_name: str = "gpt-4o",
20
+ api_key: Optional[str] = None,
21
+ system_prompt: Optional[str] = None,
22
+ tools: Optional[List[Any]] = None,
23
+ name: str = "jefe",
24
+ description: str = "Master coordinator agent for GAIA benchmark tasks",
25
+ llm: Optional[LLM] = None,
26
+ **kwargs
27
+ ):
28
+ """
29
+ Initialize a GaiaAgent with flexible model configuration.
30
+
31
+ Args:
32
+ model_provider: The LLM provider to use ("openai", "anthropic", "cohere", etc.)
33
+ model_name: The specific model name to use
34
+ api_key: API key for the provider (defaults to environment variable)
35
+ system_prompt: Custom system prompt (defaults to GAIA benchmark prompt)
36
+ tools: List of tools to make available to the agent
37
+ name: Name of the agent
38
+ description: Description of the agent
39
+ llm: Pre-configured LLM instance (if provided, model_provider and model_name are ignored)
40
+ **kwargs: Additional parameters to pass to ReActAgent
41
+ """
42
+ from tools.text_tools import reverse_text_tool
43
+
44
+ # Use pre-configured LLM if provided, otherwise initialize based on provider
45
+ if llm is None:
46
+ llm = self._initialize_llm(model_provider, model_name, api_key)
47
+
48
+ # Use default tools if not provided
49
+ if tools is None:
50
+ tools = [reverse_text_tool]
51
+
52
+ # Use default system prompt if not provided
53
+ if system_prompt is None:
54
+ system_prompt = self._get_default_system_prompt()
55
+
56
+ # Initialize the parent ReActAgent
57
+ super().__init__(
58
+ name=name,
59
+ description=description,
60
+ llm=llm,
61
+ system_prompt=system_prompt,
62
+ tools=tools,
63
+ **kwargs
64
+ )
65
+
66
+ def _initialize_llm(self, model_provider: str, model_name: str, api_key: Optional[str]) -> LLM:
67
+ """Initialize the appropriate LLM based on the provider."""
68
+ model_provider = model_provider.lower()
69
+
70
+ if model_provider == "openai":
71
+ return OpenAI(model=model_name, api_key=api_key or os.getenv("OPENAI_API_KEY"))
72
+
73
+ elif model_provider == "anthropic":
74
+ return Anthropic(model=model_name, api_key=api_key or os.getenv("ANTHROPIC_API_KEY"))
75
+
76
+ elif model_provider == "cohere":
77
+ from llama_index.llms.cohere import Cohere
78
+ return Cohere(model=model_name, api_key=api_key or os.getenv("COHERE_API_KEY"))
79
+
80
+ elif model_provider == "huggingface":
81
+ from llama_index.llms.huggingface import HuggingFaceLLM
82
+ return HuggingFaceLLM(model_name=model_name, tokenizer_name=model_name)
83
+
84
+ elif model_provider == "llama":
85
+ from llama_index.llms.llama_cpp import LlamaCPP
86
+ return LlamaCPP(model_path=model_name)
87
+
88
+ else:
89
+ raise ValueError(f"Unsupported model provider: {model_provider}. "
90
+ f"Supported providers are: openai, anthropic, cohere, huggingface, llama")
91
+
92
+ def _get_default_system_prompt(self) -> str:
93
+ """Return the default system prompt for GAIA benchmark tasks."""
94
+ return """
95
+ You are the lead coordinator for a team of specialized AI agents tackling the GAIA benchmark. Your job is to analyze each question with extreme precision, determine the exact format required for the answer, break the task into logical steps, and either solve it yourself or delegate to the appropriate specialized agents.
96
 
97
+ ## QUESTION ANALYSIS PROCESS
98
+ 1. First, carefully read and parse the entire question
99
+ 2. Identify the EXACT output format required (single word, name, number, comma-separated list, etc.)
100
+ 3. Note any special formatting requirements (alphabetical order, specific notation, etc.)
101
+ 4. Identify what type of task this is (research, audio analysis, video analysis, code execution, data analysis, etc.)
102
+ 5. Break the question into sequential steps
103
 
104
+ ## DELEGATION GUIDELINES
105
+ - video_analyst: Use for all YouTube video analysis, visual content identification, or scene description
106
+ - audio_analyst: Use for transcribing audio files, identifying speakers, or extracting information from recordings
107
+ - researcher: Use for factual queries, literature searches, finding specific information in papers or websites
108
+ - code_analyst: Use for executing, debugging or analyzing code snippets
109
+ - excel_analyst: Use for analyzing spreadsheets, calculating values, or extracting data from Excel files
 
 
 
 
 
110
 
111
+ ## CRITICAL RESPONSE RULES
112
+ - NEVER include explanations in your final answer
113
+ - NEVER include phrases like "the answer is" or "the result is"
114
+ - Return EXACTLY what was asked for - no more, no less
115
+ - If asked for a name, return ONLY the name
116
+ - If asked for a number, return ONLY the number
117
+ - If asked for a list, format it EXACTLY as specified (comma-separated, alphabetical, etc.)
118
+ - Double-check your answer against the exact output requirements before submitting
119
 
120
+ ## EXAMPLES OF PROPER RESPONSES:
121
+ Question: "What is the first name of the scientist who discovered penicillin?"
122
+ Correct answer: Alexander
123
 
124
+ Question: "List the prime numbers between 10 and 20 in ascending order."
125
+ Correct answer: 11, 13, 17, 19
126
 
127
+ Question: "If you understand this sentence, write the opposite of the word 'right' as the answer."
128
+ Correct answer: left
129
 
130
+ Question: "How many at bats did the Yankee with the most walks in the 1977 regular season have that same season?"
131
+ Correct answer: 572
132
 
133
+ For questions with reverse text:
134
+ 1. Use your reverse_text_tool to process the text
135
+ 2. Understand the instruction in the reversed text
136
+ 3. Follow the instruction exactly
137
 
138
+ After you have the final answer, verify one last time that it meets ALL formatting requirements from the question before submitting.
139
 
140
+ IMPORTANT: Your value is in providing PRECISELY what was asked for - not in showing your work or explaining how you got there.
141
+ """
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -3,7 +3,7 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
6
- from agents.llama_index_agent import main_agent
7
  import asyncio
8
  # (Keep Constants as is)
9
  # --- Constants ---
@@ -11,17 +11,45 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
11
 
12
  # --- Basic Agent Definition ---
13
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
 
 
 
 
 
 
 
 
 
14
  class BasicAgent:
15
- def __init__(self):
16
- print("BasicAgent initialized.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  def __call__(self, question: str) -> str:
 
18
  print(f"Agent received question (first 50 chars): {question[:50]}...")
 
19
  async def agentic_main():
20
- response = await main_agent.run(question)
21
  return response
 
22
  response = asyncio.run(agentic_main())
23
- print(f"Agent returning answer: {response.response.blocks[-1].text}")
24
- return response.response.blocks[-1].text
 
25
 
26
  def run_and_submit_all( profile: gr.OAuthProfile | None):
27
  """
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from agents.llama_index_agent import GaiaAgent
7
  import asyncio
8
  # (Keep Constants as is)
9
  # --- Constants ---
 
11
 
12
  # --- Basic Agent Definition ---
13
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
14
+
15
+ CLAUDE = {
16
+ "model_provider": "anthropic",
17
+ "model_name": "claude-3-7-sonnet-20250219"
18
+ }
19
+ OPENAI = {
20
+ "model_provider": "openai",
21
+ "model_name": "gpt-4o"
22
+ }
23
  class BasicAgent:
24
+ def __init__(
25
+ self,
26
+ model_provider="openai",
27
+ model_name="gpt-4o",
28
+ api_key=None
29
+ ):
30
+ """
31
+ Initialize the BasicAgent with configurable model settings.
32
+
33
+ Args:
34
+ model_provider: LLM provider to use (openai, anthropic, etc.)
35
+ model_name: Specific model to use
36
+ api_key: Optional API key (defaults to environment variable)
37
+ """
38
+ self.agent = GaiaAgent(**CLAUDE)
39
+ print(f"BasicAgent initialized with {model_provider} {model_name}.")
40
+
41
  def __call__(self, question: str) -> str:
42
+ """Process a GAIA benchmark question and return the formatted answer."""
43
  print(f"Agent received question (first 50 chars): {question[:50]}...")
44
+
45
  async def agentic_main():
46
+ response = await self.agent.run(question)
47
  return response
48
+
49
  response = asyncio.run(agentic_main())
50
+ final_answer = response.response.blocks[-1].text
51
+ print(f"Agent returning answer: {final_answer}")
52
+ return final_answer
53
 
54
  def run_and_submit_all( profile: gr.OAuthProfile | None):
55
  """