Spaces:
Runtime error
Runtime error
File size: 3,803 Bytes
bbf17d2 5d6fe2e bbf17d2 7f9795b 72f623a 2e72f39 7f9795b 6b9c0e4 7f9795b 6b9c0e4 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b bbf17d2 7f9795b 372022a 7f9795b bbf17d2 7f9795b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
import os
import gradio as gr
import requests
import pandas as pd
from transformers import AutoModelForCausalLM, AutoTokenizer
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# --- Basic Agent Logic ---
class BasicAgent:
def __init__(self):
print("BasicAgent initialized.")
self.llm = AutoModelForCausalLM.from_pretrained("gpt2")
self.tokenizer = AutoTokenizer.from_pretrained("gpt2")
self.agent_prompt = (
"You are a general AI assistant. I will ask you a question. "
"Finish your answer with the format: FINAL ANSWER: [YOUR FINAL ANSWER]."
)
def __call__(self, question: str) -> str:
input_text = f"{self.agent_prompt}\n\nQuestion: {question}"
inputs = self.tokenizer(input_text, return_tensors="pt")
outputs = self.llm.generate(**inputs)
decoded = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
final = decoded.split("FINAL ANSWER:")[-1].strip()
return f"FINAL ANSWER: {final}" if final else "FINAL ANSWER: UNKNOWN"
# --- Submission Function ---
def run_and_submit_all(username):
space_id = os.getenv("SPACE_ID", "your-username/your-space") # fallback
if not username.strip():
return "Username is required for submission.", None
agent = BasicAgent()
questions_url = f"{DEFAULT_API_URL}/questions"
submit_url = f"{DEFAULT_API_URL}/submit"
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
except Exception as e:
return f"Failed to fetch questions: {e}", None
answers = []
log = []
for item in questions_data:
task_id = item.get("task_id")
question = item.get("question")
if not task_id or not question:
continue
try:
answer = agent(question)
answers.append({"task_id": task_id, "submitted_answer": answer})
log.append({"Task ID": task_id, "Question": question, "Submitted Answer": answer})
except Exception as e:
log.append({"Task ID": task_id, "Question": question, "Submitted Answer": f"ERROR: {e}"})
if not answers:
return "No answers submitted.", pd.DataFrame(log)
payload = {
"username": username.strip(),
"agent_code": agent_code,
"answers": answers
}
try:
response = requests.post(submit_url, json=payload, timeout=30)
response.raise_for_status()
result = response.json()
status = (
f"Submission Successful!\n"
f"User: {result.get('username')}\n"
f"Score: {result.get('score', 'N/A')}% "
f"({result.get('correct_count', '?')}/{result.get('total_attempted', '?')} correct)\n"
f"Message: {result.get('message', '')}"
)
return status, pd.DataFrame(log)
except Exception as e:
return f"Submission failed: {e}", pd.DataFrame(log)
# --- Gradio UI ---
with gr.Blocks() as demo:
gr.Markdown("## 🚀 Basic Agent Evaluation & Submission")
gr.Markdown("Enter your Hugging Face username and press **Run and Submit** to evaluate your agent and submit your results.")
username_input = gr.Textbox(label="Hugging Face Username", placeholder="e.g. your-hf-username")
run_button = gr.Button("Run and Submit")
status_output = gr.Textbox(label="Submission Status", lines=4, interactive=False)
results_table = gr.DataFrame(label="Submitted Answers")
run_button.click(fn=run_and_submit_all, inputs=[username_input], outputs=[status_output, results_table])
if __name__ == "__main__":
demo.launch(debug=True) |