jjjulllesss commited on
Commit
6de65a0
·
1 Parent(s): 81917a3

Agent added

Browse files
Files changed (3) hide show
  1. agent.py +198 -0
  2. app.py +22 -24
  3. requirements.txt +6 -1
agent.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from smolagents import CodeAgent, LiteLLMModel, DuckDuckGoSearchTool, WikipediaSearchTool, Tool, VisitWebpageTool
2
+ import os
3
+ import json
4
+ import requests
5
+ import pandas as pd
6
+ from huggingface_hub import InferenceClient
7
+
8
+ class getFile(Tool):
9
+ name = "getFileTool"
10
+ description = "This tool allows to download the file attached to the question"
11
+ output_type = "string"
12
+ inputs = {
13
+ "task_id": {
14
+ "type": "string",
15
+ "description": "The task id of the question",
16
+ },
17
+ "file_name": {
18
+ "type": "string",
19
+ "description": "The name of the file to download"
20
+ }
21
+ }
22
+
23
+ def forward(self, task_id: str, file_name: str) -> str:
24
+ # Download the file from the task id
25
+ file = f"https://agents-course-unit4-scoring.hf.space/files/{task_id}"
26
+ # Save the file with the file name
27
+ with open(file_name, "wb") as f:
28
+ f.write(requests.get(file).content)
29
+
30
+ # Return the file name
31
+ return os.path.abspath(file_name)
32
+
33
+ class LoadXlsxFileTool(Tool):
34
+ name = "load_xlsx_file_tool"
35
+ description = """This tool loads xlsx file into pandas and returns it"""
36
+ inputs = {
37
+ "file_path": {"type": "string", "description": "File path"}
38
+ }
39
+ output_type = "object"
40
+
41
+ def forward(self, file_path: str) -> object:
42
+ return pd.read_excel(file_path)
43
+
44
+ class LoadTextFileTool(Tool):
45
+ name = "load_text_file_tool"
46
+ description = """This tool loads any text file"""
47
+ inputs = {
48
+ "file_path": {"type": "string", "description": "File path"}
49
+ }
50
+ output_type = "string"
51
+
52
+ def forward(self, file_path: str) -> object:
53
+ with open(file_path, 'r', encoding='utf-8') as file:
54
+ return file.read()
55
+
56
+ class AudioToTextTool(Tool):
57
+ name = "audio_to_text_tool"
58
+ description = """This tool transcribes audio files into text"""
59
+ inputs = {
60
+ "file_path": {"type": "string", "description": "File path"}
61
+ }
62
+ output_type = "string"
63
+
64
+ def forward(self, file_path: str) -> str:
65
+ try:
66
+ # Check if file exists
67
+ if not os.path.exists(file_path):
68
+ return f"Error: File {file_path} does not exist"
69
+
70
+ # Read the audio file as raw bytes
71
+ with open(file_path, "rb") as f:
72
+ audio_data = f.read()
73
+
74
+ # Set up the API URL and headers
75
+ api_url = "https://router.huggingface.co/hf-inference/models/openai/whisper-large-v3-turbo"
76
+ headers = {
77
+ "Authorization": f"Bearer {os.getenv('HF_API_KEY')}",
78
+ "Content-Type": "audio/mpeg" # Assuming MP3 format
79
+ }
80
+
81
+ # Make the API request
82
+ response = requests.post(api_url, headers=headers, data=audio_data)
83
+ response.raise_for_status() # Raise an exception for bad status codes
84
+
85
+ # Parse and return the response
86
+ output = response.json()
87
+ return output["text"]
88
+
89
+ except Exception as e:
90
+ return f"Error transcribing audio: {str(e)}"
91
+
92
+ class ImageAnalysisTool(Tool):
93
+ name = "image_analysis_tool"
94
+ description = """This tool analyzes images and returns the text and the information in the image"""
95
+ inputs = {
96
+ "task_id": {"type": "string", "description": "The task id of the question"},
97
+ }
98
+ output_type = "string"
99
+
100
+ def forward(self, task_id: str) -> str:
101
+ client = InferenceClient(
102
+ provider="nebius",
103
+ api_key=os.getenv("HF_API_KEY"),
104
+ )
105
+
106
+ completion = client.chat.completions.create(
107
+ model="mistralai/Mistral-Small-3.1-24B-Instruct-2503",
108
+ messages=[
109
+ {
110
+ "role": "user",
111
+ "content": [
112
+ {
113
+ "type": "text",
114
+ "text": "Describe this image in markdown format"
115
+ },
116
+ {
117
+ "type": "image_url",
118
+ "image_url": {
119
+ "url": f"https://agents-course-unit4-scoring.hf.space/files/{task_id}"
120
+ }
121
+ }
122
+ ]
123
+ }
124
+ ],
125
+ )
126
+
127
+ return completion.choices[0].message.content
128
+
129
+
130
+ def final_answer_formatting(answer, question):
131
+ model = LiteLLMModel(
132
+ model_id="gemini/gemini-2.0-flash",
133
+ api_key=os.getenv("GOOGLE_API_KEY"),
134
+ )
135
+
136
+ prompt = f"""
137
+ You are an AI assistant specialized in the GAIA benchmark. For the question provided, generate the answer in the exact format requested by the question. Do not include any other text or creative additions.
138
+
139
+ Question: {question}
140
+ Answer: {answer}
141
+ """
142
+
143
+ messages = [
144
+ {"role": "user", "content": [{"type": "text", "text": prompt}]}
145
+ ]
146
+
147
+ output = model(messages).content
148
+ return output
149
+
150
+
151
+ web_agent = CodeAgent(
152
+ model=LiteLLMModel(
153
+ model_id="gemini/gemini-2.0-flash",
154
+ api_key=os.getenv("GOOGLE_API_KEY"),
155
+ ),
156
+ tools=[
157
+ WikipediaSearchTool(),
158
+ DuckDuckGoSearchTool(),
159
+ VisitWebpageTool()
160
+ ],
161
+ add_base_tools=False,
162
+ additional_authorized_imports=[
163
+ "os", "requests", "inspect", "pandas",
164
+ "datetime", "re", "bs4", "markdownify"
165
+ ],
166
+ max_steps=10,
167
+ name="web_agent",
168
+ description="This agent is used to search the web for information"
169
+ )
170
+
171
+ audio_agent = CodeAgent(
172
+ model=LiteLLMModel(
173
+ model_id="gemini/gemini-2.0-flash",
174
+ api_key=os.getenv("GOOGLE_API_KEY"),
175
+ ),
176
+ tools=[AudioToTextTool()],
177
+ add_base_tools=False,
178
+ max_steps=10,
179
+ name="audio_agent",
180
+ description="This agent is used to analyze and transcribe audio files"
181
+ )
182
+
183
+
184
+ manager_agent = CodeAgent(
185
+ name="manager_agent",
186
+ model=LiteLLMModel(
187
+ model_id="gemini/gemini-2.5-flash-preview-04-17",
188
+ api_key=os.getenv("GOOGLE_API_KEY"),
189
+ ),
190
+ tools=[getFile(), LoadXlsxFileTool(), LoadTextFileTool(), ImageAnalysisTool()],
191
+ managed_agents=[web_agent, audio_agent],
192
+ additional_authorized_imports=[
193
+ "pandas"
194
+ ],
195
+ planning_interval=5,
196
+ verbosity_level=1,
197
+ max_steps=10,
198
+ )
app.py CHANGED
@@ -3,25 +3,15 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
 
6
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
11
- # --- Basic Agent Definition ---
12
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
- class BasicAgent:
14
- def __init__(self):
15
- print("BasicAgent initialized.")
16
- def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
21
-
22
- def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
24
- Fetches all questions, runs the BasicAgent on them, submits all answers,
25
  and displays the results.
26
  """
27
  # --- Determine HF Space Runtime URL and Repo URL ---
@@ -38,12 +28,13 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
38
  questions_url = f"{api_url}/questions"
39
  submit_url = f"{api_url}/submit"
40
 
41
- # 1. Instantiate Agent ( modify this part to create your agent)
42
  try:
43
- agent = BasicAgent()
44
  except Exception as e:
45
  print(f"Error instantiating agent: {e}")
46
  return f"Error initializing agent: {e}", None
 
47
  # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
48
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
49
  print(agent_code)
@@ -76,13 +67,22 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
76
  for item in questions_data:
77
  task_id = item.get("task_id")
78
  question_text = item.get("question")
 
79
  if not task_id or question_text is None:
80
  print(f"Skipping item with missing task_id or question: {item}")
81
  continue
82
  try:
83
- submitted_answer = agent(question_text)
84
- answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
 
 
 
 
 
 
 
 
86
  except Exception as e:
87
  print(f"Error running agent on task {task_id}: {e}")
88
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
@@ -142,14 +142,13 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
142
 
143
  # --- Build Gradio Interface using Blocks ---
144
  with gr.Blocks() as demo:
145
- gr.Markdown("# Basic Agent Evaluation Runner")
146
  gr.Markdown(
147
  """
148
  **Instructions:**
149
 
150
- 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
151
- 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
152
- 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
153
 
154
  ---
155
  **Disclaimers:**
@@ -163,7 +162,6 @@ with gr.Blocks() as demo:
163
  run_button = gr.Button("Run Evaluation & Submit All Answers")
164
 
165
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
166
- # Removed max_rows=10 from DataFrame constructor
167
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
168
 
169
  run_button.click(
@@ -192,5 +190,5 @@ if __name__ == "__main__":
192
 
193
  print("-"*(60 + len(" App Starting ")) + "\n")
194
 
195
- print("Launching Gradio Interface for Basic Agent Evaluation...")
196
  demo.launch(debug=True, share=False)
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from agent import manager_agent, final_answer_formatting
7
 
8
  # (Keep Constants as is)
9
  # --- Constants ---
10
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
11
 
12
+ def run_and_submit_all(profile: gr.OAuthProfile | None):
 
 
 
 
 
 
 
 
 
 
 
13
  """
14
+ Fetches all questions, runs the manager_agent on them, submits all answers,
15
  and displays the results.
16
  """
17
  # --- Determine HF Space Runtime URL and Repo URL ---
 
28
  questions_url = f"{api_url}/questions"
29
  submit_url = f"{api_url}/submit"
30
 
31
+ # 1. Use the manager_agent from agent.py
32
  try:
33
+ agent = manager_agent
34
  except Exception as e:
35
  print(f"Error instantiating agent: {e}")
36
  return f"Error initializing agent: {e}", None
37
+
38
  # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
39
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
40
  print(agent_code)
 
67
  for item in questions_data:
68
  task_id = item.get("task_id")
69
  question_text = item.get("question")
70
+ file_name = item.get("file_name")
71
  if not task_id or question_text is None:
72
  print(f"Skipping item with missing task_id or question: {item}")
73
  continue
74
  try:
75
+ # Format the input for the manager agent
76
+ agent_input = f"""
77
+ task_id: {task_id}
78
+ file_name: {file_name if file_name else None}
79
+ Question to answer: {question_text}
80
+ """
81
+ submitted_answer = agent(agent_input)
82
+ # Format the answer using the final_answer_formatting function
83
+ formatted_answer = final_answer_formatting(submitted_answer, question_text)
84
+ answers_payload.append({"task_id": task_id, "submitted_answer": formatted_answer})
85
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": formatted_answer})
86
  except Exception as e:
87
  print(f"Error running agent on task {task_id}: {e}")
88
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
 
142
 
143
  # --- Build Gradio Interface using Blocks ---
144
  with gr.Blocks() as demo:
145
+ gr.Markdown("# GAIA Agent Evaluation Runner")
146
  gr.Markdown(
147
  """
148
  **Instructions:**
149
 
150
+ 1. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
151
+ 2. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
 
152
 
153
  ---
154
  **Disclaimers:**
 
162
  run_button = gr.Button("Run Evaluation & Submit All Answers")
163
 
164
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
 
165
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
166
 
167
  run_button.click(
 
190
 
191
  print("-"*(60 + len(" App Starting ")) + "\n")
192
 
193
+ print("Launching Gradio Interface for GAIA Agent Evaluation...")
194
  demo.launch(debug=True, share=False)
requirements.txt CHANGED
@@ -1,2 +1,7 @@
1
  gradio
2
- requests
 
 
 
 
 
 
1
  gradio
2
+ requests
3
+ pandas
4
+ openpyxl
5
+ huggingface-hub
6
+ smolagents
7
+ python-dotenv