Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pandas as pd
|
3 |
+
from fastapi import FastAPI, HTTPException, Body
|
4 |
+
from pydantic import BaseModel, Field
|
5 |
+
from typing import List, Dict, Any
|
6 |
+
from datasets import load_dataset, Dataset, DatasetDict
|
7 |
+
from huggingface_hub import HfApi, hf_hub_download
|
8 |
+
from datetime import datetime, timezone
|
9 |
+
import logging
|
10 |
+
import uvicorn # To run the app
|
11 |
+
|
12 |
+
# --- Configuration ---
|
13 |
+
HF_DATASET_ID = "agents-course/unit4-students-scores"
|
14 |
+
# Ensure you have write access to this dataset repository on Hugging Face
|
15 |
+
# and are logged in via `huggingface-cli login` or have HF_TOKEN env var set.
|
16 |
+
|
17 |
+
# --- Logging Setup ---
|
18 |
+
logging.basicConfig(level=logging.INFO)
|
19 |
+
logger = logging.getLogger(__name__)
|
20 |
+
|
21 |
+
# --- Load and Prepare Filtered Questions ---
|
22 |
+
# Placeholder: Replace this with your actual filtered data loading logic
|
23 |
+
# This data MUST contain 'task_id', 'Question', and 'Final answer'
|
24 |
+
# Example structure:
|
25 |
+
# filtered_data = [
|
26 |
+
# {'task_id': 'e1fc63a2-da7a-432f-be78-7c4a95598703', 'Question': 'If Eliud Kipchoge...', 'Final answer': '17', ... other keys ...},
|
27 |
+
# {'task_id': 'example_pass', 'Question': 'Another question', 'Final answer': '42', ... other keys ...},
|
28 |
+
# # ... more filtered questions
|
29 |
+
# ]
|
30 |
+
|
31 |
+
# Let's simulate loading your filtered data (replace with your actual loading)
|
32 |
+
# Assuming you have the 'filtered_questions' list from the previous step's code
|
33 |
+
# Example data if you don't have it handy:
|
34 |
+
filtered_data = [
|
35 |
+
{'task_id': 'q1', 'Question': 'What is 2+2?', 'Level': '1', 'Final answer': '4', 'Annotator Metadata': {'Number of steps': '1', 'Number of tools': '1'}},
|
36 |
+
{'task_id': 'q2', 'Question': 'Capital of France?', 'Level': '1', 'Final answer': 'Paris', 'Annotator Metadata': {'Number of steps': '1', 'Number of tools': '1'}},
|
37 |
+
{'task_id': 'q3', 'Question': '10 / 2 ?', 'Level': '1', 'Final answer': '5', 'Annotator Metadata': {'Number of steps': '1', 'Number of tools': '1'}}
|
38 |
+
]
|
39 |
+
# filtered_data = filtered_questions # Uncomment this if you have the list from previous step
|
40 |
+
|
41 |
+
# Prepare data structures for the API
|
42 |
+
questions_for_api: List[Dict[str, str]] = []
|
43 |
+
ground_truth_answers: Dict[str, str] = {}
|
44 |
+
|
45 |
+
for item in filtered_data:
|
46 |
+
task_id = item.get('task_id')
|
47 |
+
question_text = item.get('Question')
|
48 |
+
final_answer = item.get('Final answer')
|
49 |
+
|
50 |
+
if task_id and question_text and final_answer is not None:
|
51 |
+
questions_for_api.append({
|
52 |
+
"task_id": str(task_id), # Ensure ID is string
|
53 |
+
"question": question_text
|
54 |
+
})
|
55 |
+
ground_truth_answers[str(task_id)] = str(final_answer) # Ensure answer is string
|
56 |
+
else:
|
57 |
+
logger.warning(f"Skipping item due to missing fields: {item}")
|
58 |
+
|
59 |
+
logger.info(f"Loaded {len(questions_for_api)} questions for the API.")
|
60 |
+
if not questions_for_api:
|
61 |
+
logger.error("No valid questions loaded. API will not function correctly.")
|
62 |
+
# You might want to exit or raise an error here depending on requirements
|
63 |
+
|
64 |
+
# --- Pydantic Models for Data Validation ---
|
65 |
+
class Question(BaseModel):
|
66 |
+
task_id: str
|
67 |
+
question: str
|
68 |
+
|
69 |
+
class AnswerItem(BaseModel):
|
70 |
+
task_id: str
|
71 |
+
submitted_answer: str = Field(..., description="The agent's answer for the task_id")
|
72 |
+
|
73 |
+
class Submission(BaseModel):
|
74 |
+
username: str = Field(..., description="Hugging Face username", min_length=1)
|
75 |
+
agent_code: str = Field(..., description="The Python class code for the agent", min_length=10) # Basic check
|
76 |
+
answers: List[AnswerItem] = Field(..., description="List of answers submitted by the agent")
|
77 |
+
|
78 |
+
class ScoreResponse(BaseModel):
|
79 |
+
username: str
|
80 |
+
score: float
|
81 |
+
correct_count: int
|
82 |
+
total_attempted: int
|
83 |
+
message: str
|
84 |
+
timestamp: str
|
85 |
+
|
86 |
+
class ErrorResponse(BaseModel):
|
87 |
+
detail: str
|
88 |
+
|
89 |
+
# --- FastAPI Application ---
|
90 |
+
app = FastAPI(
|
91 |
+
title="Agent Evaluation API",
|
92 |
+
description="API to fetch questions and submit agent answers for scoring.",
|
93 |
+
)
|
94 |
+
|
95 |
+
# --- Helper Function to interact with HF Dataset ---
|
96 |
+
def update_huggingface_dataset(username: str, score: float):
|
97 |
+
"""Loads the dataset, updates the score if higher, and pushes back."""
|
98 |
+
try:
|
99 |
+
# 1. Load the dataset
|
100 |
+
logger.info(f"Loading dataset '{HF_DATASET_ID}'...")
|
101 |
+
# Try loading, handle case where dataset might be empty or non-existent initially
|
102 |
+
try:
|
103 |
+
# Use hf_hub_download to check if the parquet file exists, avoiding full dataset load error if empty
|
104 |
+
# This assumes the dataset uses the default 'train' split and parquet format. Adjust if needed.
|
105 |
+
hf_hub_download(repo_id=HF_DATASET_ID, filename="data/train-00000-of-00001.parquet", repo_type="dataset")
|
106 |
+
ds = load_dataset(HF_DATASET_ID)
|
107 |
+
logger.info("Dataset loaded successfully.")
|
108 |
+
# Check if it has a 'train' split, common default
|
109 |
+
if "train" not in ds:
|
110 |
+
logger.warning(f"Dataset '{HF_DATASET_ID}' does not contain a 'train' split. Creating one.")
|
111 |
+
# Create an empty DataFrame with the correct schema if 'train' split is missing
|
112 |
+
df = pd.DataFrame({'username': pd.Series(dtype='str'),
|
113 |
+
'score': pd.Series(dtype='float'),
|
114 |
+
'timestamp': pd.Series(dtype='str')})
|
115 |
+
ds = DatasetDict({'train': Dataset.from_pandas(df)})
|
116 |
+
else:
|
117 |
+
# Convert the 'train' split to a pandas DataFrame for easier manipulation
|
118 |
+
df = ds['train'].to_pandas()
|
119 |
+
|
120 |
+
except Exception as load_error: # Catch broad exception for file not found or other loading issues
|
121 |
+
logger.warning(f"Could not load dataset '{HF_DATASET_ID}' or it might be empty/new ({load_error}). Creating structure.")
|
122 |
+
# Create an empty DataFrame with the correct schema
|
123 |
+
df = pd.DataFrame({'username': pd.Series(dtype='str'),
|
124 |
+
'score': pd.Series(dtype='float'),
|
125 |
+
'timestamp': pd.Series(dtype='str')})
|
126 |
+
|
127 |
+
|
128 |
+
# Ensure columns exist, add if they don't
|
129 |
+
for col, dtype in [('username', 'str'), ('score', 'float'), ('timestamp', 'str')]:
|
130 |
+
if col not in df.columns:
|
131 |
+
logger.warning(f"Column '{col}' not found in dataset. Adding it.")
|
132 |
+
df[col] = pd.Series(dtype=dtype)
|
133 |
+
|
134 |
+
|
135 |
+
# Convert score column to numeric, coercing errors
|
136 |
+
df['score'] = pd.to_numeric(df['score'], errors='coerce')
|
137 |
+
|
138 |
+
|
139 |
+
# 2. Find existing score for the user
|
140 |
+
existing_entries = df[df['username'] == username]
|
141 |
+
current_timestamp = datetime.now(timezone.utc).isoformat()
|
142 |
+
needs_update = False
|
143 |
+
|
144 |
+
if not existing_entries.empty:
|
145 |
+
# User exists, find their highest score
|
146 |
+
# Handle potential NaN scores from coercion or previous bad data
|
147 |
+
max_existing_score = existing_entries['score'].max()
|
148 |
+
if pd.isna(max_existing_score) or score > max_existing_score:
|
149 |
+
logger.info(f"New score {score} is higher than existing max {max_existing_score} for {username}. Updating.")
|
150 |
+
# Remove old entries for this user
|
151 |
+
df = df[df['username'] != username]
|
152 |
+
# Add new entry
|
153 |
+
new_entry = pd.DataFrame([{'username': username, 'score': score, 'timestamp': current_timestamp}])
|
154 |
+
df = pd.concat([df, new_entry], ignore_index=True)
|
155 |
+
needs_update = True
|
156 |
+
else:
|
157 |
+
logger.info(f"New score {score} is not higher than existing max {max_existing_score} for {username}. No update needed.")
|
158 |
+
else:
|
159 |
+
# User does not exist, add them
|
160 |
+
logger.info(f"User {username} not found. Adding new entry.")
|
161 |
+
new_entry = pd.DataFrame([{'username': username, 'score': score, 'timestamp': current_timestamp}])
|
162 |
+
df = pd.concat([df, new_entry], ignore_index=True)
|
163 |
+
needs_update = True
|
164 |
+
|
165 |
+
# 3. Push updated data back to Hugging Face Hub if changes were made
|
166 |
+
if needs_update:
|
167 |
+
logger.info(f"Pushing updated dataset to '{HF_DATASET_ID}'...")
|
168 |
+
# Convert potentially modified DataFrame back to a Dataset object
|
169 |
+
# Ensure the schema matches if columns were added/modified.
|
170 |
+
# Use 'train' split convention.
|
171 |
+
updated_ds = DatasetDict({'train': Dataset.from_pandas(df)})
|
172 |
+
updated_ds.push_to_hub(HF_DATASET_ID) # Token should be picked up from env or login
|
173 |
+
logger.info("Dataset push successful.")
|
174 |
+
return True
|
175 |
+
else:
|
176 |
+
return False # No update was pushed
|
177 |
+
|
178 |
+
except Exception as e:
|
179 |
+
logger.error(f"Error interacting with Hugging Face dataset '{HF_DATASET_ID}': {e}", exc_info=True)
|
180 |
+
# Re-raise the exception to be caught by the endpoint handler
|
181 |
+
raise HTTPException(status_code=500, detail=f"Failed to update Hugging Face dataset: {e}")
|
182 |
+
|
183 |
+
|
184 |
+
# --- API Endpoints ---
|
185 |
+
|
186 |
+
@app.get("/questions",
|
187 |
+
response_model=List[Question],
|
188 |
+
summary="Get Filtered Questions",
|
189 |
+
description="Returns a list of questions (task_id and question text only) for the agent evaluation.")
|
190 |
+
async def get_questions():
|
191 |
+
"""
|
192 |
+
Provides the list of questions that agents should answer.
|
193 |
+
"""
|
194 |
+
if not questions_for_api:
|
195 |
+
raise HTTPException(status_code=404, detail="No questions available.")
|
196 |
+
return questions_for_api
|
197 |
+
|
198 |
+
|
199 |
+
@app.post("/submit",
|
200 |
+
response_model=ScoreResponse,
|
201 |
+
summary="Submit Agent Answers",
|
202 |
+
description="Submit answers from an agent, calculate score, and update leaderboard on Hugging Face.",
|
203 |
+
responses={
|
204 |
+
200: {"description": "Submission successful, score calculated."},
|
205 |
+
400: {"model": ErrorResponse, "description": "Invalid input data."},
|
206 |
+
404: {"model": ErrorResponse, "description": "Task ID not found."},
|
207 |
+
500: {"model": ErrorResponse, "description": "Server error (e.g., failed to update dataset)."}
|
208 |
+
})
|
209 |
+
async def submit_answers(submission: Submission = Body(...)):
|
210 |
+
"""
|
211 |
+
Receives agent submissions:
|
212 |
+
- Validates input.
|
213 |
+
- Checks presence of agent code (basic anti-cheat).
|
214 |
+
- Calculates score based on submitted answers vs ground truth.
|
215 |
+
- Updates the score on the Hugging Face dataset if it's a new high score for the user.
|
216 |
+
"""
|
217 |
+
logger.info(f"Received submission from username: {submission.username}")
|
218 |
+
|
219 |
+
# Basic check for agent code presence
|
220 |
+
if not submission.agent_code or len(submission.agent_code.strip()) < 10:
|
221 |
+
logger.warning(f"Submission rejected for {submission.username}: Agent code missing or too short.")
|
222 |
+
raise HTTPException(status_code=400, detail="Agent code is required and must be sufficiently long.")
|
223 |
+
|
224 |
+
if not submission.answers:
|
225 |
+
logger.warning(f"Submission rejected for {submission.username}: No answers provided.")
|
226 |
+
raise HTTPException(status_code=400, detail="No answers provided in the submission.")
|
227 |
+
|
228 |
+
|
229 |
+
correct_count = 0
|
230 |
+
total_attempted = len(submission.answers)
|
231 |
+
processed_ids = set()
|
232 |
+
|
233 |
+
for answer_item in submission.answers:
|
234 |
+
task_id = str(answer_item.task_id) # Ensure string comparison
|
235 |
+
submitted = str(answer_item.submitted_answer) # Ensure string comparison
|
236 |
+
|
237 |
+
# Prevent duplicate task_id submissions in the same request
|
238 |
+
if task_id in processed_ids:
|
239 |
+
logger.warning(f"Duplicate task_id '{task_id}' in submission from {submission.username}. Skipping.")
|
240 |
+
total_attempted -= 1 # Adjust count as we skip it
|
241 |
+
continue
|
242 |
+
processed_ids.add(task_id)
|
243 |
+
|
244 |
+
|
245 |
+
# Check if task_id is valid
|
246 |
+
if task_id not in ground_truth_answers:
|
247 |
+
logger.warning(f"Task ID '{task_id}' submitted by {submission.username} not found in ground truth list.")
|
248 |
+
# Option 1: Reject the whole submission
|
249 |
+
# raise HTTPException(status_code=404, detail=f"Task ID '{task_id}' not found.")
|
250 |
+
# Option 2: Skip this answer and continue scoring others (chosen here)
|
251 |
+
total_attempted -= 1 # Don't count this attempt if the ID was invalid
|
252 |
+
continue
|
253 |
+
|
254 |
+
|
255 |
+
# Compare answers (case-insensitive, strip whitespace)
|
256 |
+
ground_truth = ground_truth_answers[task_id]
|
257 |
+
if submitted.strip().lower() == ground_truth.strip().lower():
|
258 |
+
correct_count += 1
|
259 |
+
logger.debug(f"Correct answer for {task_id} from {submission.username}")
|
260 |
+
else:
|
261 |
+
logger.debug(f"Incorrect answer for {task_id} from {submission.username}. Submitted: '{submitted}', Expected: '{ground_truth}'")
|
262 |
+
|
263 |
+
|
264 |
+
# Calculate score
|
265 |
+
if total_attempted == 0:
|
266 |
+
score = 0.0
|
267 |
+
message = "No valid answers submitted or processed."
|
268 |
+
logger.warning(f"No valid answers processed for {submission.username}.")
|
269 |
+
else:
|
270 |
+
score = round((correct_count / total_attempted) * 100, 2)
|
271 |
+
message = f"Score calculated successfully. {correct_count}/{total_attempted} correct."
|
272 |
+
logger.info(f"Score for {submission.username}: {score}% ({correct_count}/{total_attempted})")
|
273 |
+
|
274 |
+
|
275 |
+
# Update Hugging Face dataset
|
276 |
+
try:
|
277 |
+
updated = update_huggingface_dataset(submission.username, score)
|
278 |
+
if updated:
|
279 |
+
message += " High score updated on leaderboard."
|
280 |
+
logger.info(f"Leaderboard updated for {submission.username}.")
|
281 |
+
else:
|
282 |
+
message += " Score did not improve previous record, leaderboard not updated."
|
283 |
+
logger.info(f"Leaderboard not updated for {submission.username} as score was not higher.")
|
284 |
+
|
285 |
+
except HTTPException as http_exc:
|
286 |
+
# Propagate HTTPException from the helper function (e.g., 500 error)
|
287 |
+
raise http_exc
|
288 |
+
except Exception as e:
|
289 |
+
# Catch any other unexpected errors during HF update
|
290 |
+
logger.error(f"Unexpected error during dataset update for {submission.username}: {e}", exc_info=True)
|
291 |
+
raise HTTPException(status_code=500, detail="An unexpected error occurred while updating the leaderboard.")
|
292 |
+
|
293 |
+
|
294 |
+
return ScoreResponse(
|
295 |
+
username=submission.username,
|
296 |
+
score=score,
|
297 |
+
correct_count=correct_count,
|
298 |
+
total_attempted=total_attempted,
|
299 |
+
message=message,
|
300 |
+
timestamp=datetime.now(timezone.utc).isoformat()
|
301 |
+
)
|
302 |
+
# --- Run the application ---
|
303 |
+
# This part is mainly for local development without Docker.
|
304 |
+
# Docker uses the CMD instruction in the Dockerfile.
|
305 |
+
if __name__ == "__main__":
|
306 |
+
logger.info("Starting FastAPI server for local development...")
|
307 |
+
if not questions_for_api:
|
308 |
+
logger.error("EXITING: Cannot start server without loaded questions.")
|
309 |
+
else:
|
310 |
+
# Read port from environment variable for consistency, default to 8000 for local if not set
|
311 |
+
local_port = int(os.getenv("PORT", "8000"))
|
312 |
+
logger.info(f"Running Uvicorn locally on port: {local_port}")
|
313 |
+
# Note: host='127.0.0.1' is usually fine for local runs outside docker
|
314 |
+
uvicorn.run(app, host="127.0.0.1", port=local_port, log_level="info")
|