Spaces:
Runtime error
Runtime error
File size: 7,266 Bytes
8add2a1 12d5eff 9f6cf12 c38db28 9f6cf12 2ec7389 9f6cf12 12e561e c238809 10e9b7d e80aab9 2ec7389 31243f4 2ec7389 9f6cf12 2ec7389 9f6cf12 704ac65 9f6cf12 704ac65 9f6cf12 0d7f190 c38db28 9f6cf12 2ec7389 9f6cf12 2ec7389 9f6cf12 2ec7389 9f6cf12 2ec7389 e80aab9 aea3558 2ec7389 aea3558 2ec7389 aea3558 2ec7389 aea3558 2ec7389 aea3558 3c04b60 aea3558 2ec7389 aea3558 7d65c66 aea3558 3c4371f aea3558 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 |
import requests
import inspect
import os
import re
import spacy
from transformers import pipeline
from duckduckgo_search import DDGS
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import whisper
import moviepy
import gradio as gr
import pandas as pd
class BasicAgent:
def __init__(self):
print("BasicAgent initialized.")
self.whisper_model = whisper.load_model("base")
self.qa_pipeline = pipeline("question-answering")
self.ner_pipeline = pipeline("ner", aggregation_strategy="simple")
self.embedding_model = pipeline("feature-extraction")
self.spacy = spacy.load("en_core_web_sm")
def extract_named_entities(self, text):
entities = self.ner_pipeline(text)
return [e["word"] for e in entities if e["entity_group"] == "PER"]
def extract_numbers(self, text):
return re.findall(r"\d+", text)
def extract_keywords(self, text):
doc = self.spacy(text)
return [token.text for token in doc if token.pos_ in ["NOUN", "PROPN"]]
def call_whisper(self, video_path: str) -> str:
video = moviepy.editor.VideoFileClip(video_path)
audio_path = "temp_audio.wav"
video.audio.write_audiofile(audio_path)
result = self.whisper_model.transcribe(audio_path)
return result["text"]
def search(self, question: str) -> str:
try:
with DDGS() as ddgs:
results = list(ddgs.text(question, max_results=3))
if not results:
return "No relevant search results found."
context = results[0]["body"]
return context
except Exception as e:
return f"Search error: {e}"
def answer_question(self, question: str, context: str) -> str:
try:
return self.qa_pipeline(question=question, context=context)["answer"]
except:
return context # Fallback to context if QA fails
def __call__(self, question: str, video_path: str = None) -> str:
print(f"Agent received question: {question[:60]}...")
if video_path:
transcription = self.call_whisper(video_path)
print(f"Transcribed video: {transcription[:100]}...")
return transcription
context = self.search(question)
answer = self.answer_question(question, context)
q_lower = question.lower()
# Enhance based on question type
if "who" in q_lower:
people = self.extract_named_entities(context)
return f"👤 Who: {', '.join(people) if people else 'No person found'}\n\n🧠 Answer: {answer}"
elif "how many" in q_lower:
numbers = self.extract_numbers(context)
return f"🔢 How many: {', '.join(numbers) if numbers else 'No numbers found'}\n\n🧠 Answer: {answer}"
elif "how" in q_lower:
return f"⚙️ How: {answer}"
elif "what" in q_lower or "where" in q_lower:
keywords = self.extract_keywords(context)
return f"🗝️ Keywords: {', '.join(keywords[:5])}\n\n🧠 Answer: {answer}"
else:
return f"🧠 Answer: {answer}"
# --- Submission Function ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
def run_and_submit_all(profile: gr.OAuthProfile | None):
space_id = os.getenv("SPACE_ID")
if profile:
username = profile.username
print(f"User logged in: {username}")
else:
return "Please Login to Hugging Face with the button.", None
api_url = DEFAULT_API_URL
questions_url = f"{api_url}/questions"
submit_url = f"{api_url}/submit"
try:
agent = BasicAgent()
except Exception as e:
return f"Error initializing agent: {e}", None
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
print(f"Agent repo: {agent_code}")
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
print(f"Fetched {len(questions_data)} questions.")
except Exception as e:
return f"Error fetching questions: {e}", None
results_log = []
answers_payload = []
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
video_link = item.get("video_link")
if not task_id or question_text is None:
continue
try:
submitted_answer = agent(question_text, video_path=video_link)
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
except Exception as e:
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"ERROR: {e}"})
if not answers_payload:
return "No answers were submitted.", pd.DataFrame(results_log)
submission_data = {
"username": username.strip(),
"agent_code": agent_code,
"answers": answers_payload
}
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"✅ Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')})\n"
f"Message: {result_data.get('message', '')}"
)
return final_status, pd.DataFrame(results_log)
except Exception as e:
return f"Submission Failed: {e}", pd.DataFrame(results_log)
# --- Gradio Interface ---
with gr.Blocks() as demo:
gr.Markdown("# Basic Agent Evaluation Runner")
gr.Markdown(
"""
**Instructions:**
1. Clone this space and modify the agent logic if desired.
2. Log in to Hugging Face with the button below.
3. Click 'Run Evaluation & Submit All Answers' to evaluate and submit your agent.
---
**Note:** This process may take several minutes depending on the number of questions.
"""
)
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
run_button.click(
fn=run_and_submit_all,
outputs=[status_output, results_table]
)
if __name__ == "__main__":
print("-" * 30 + " App Starting " + "-" * 30)
space_host = os.getenv("SPACE_HOST")
space_id = os.getenv("SPACE_ID")
if space_host:
print(f"✅ SPACE_HOST: {space_host}")
print(f" → https://{space_host}.hf.space")
else:
print("ℹ️ No SPACE_HOST set.")
if space_id:
print(f"✅ SPACE_ID: {space_id}")
print(f" → https://huggingface.co/spaces/{space_id}/tree/main")
else:
print("ℹ️ No SPACE_ID set.")
demo.launch(debug=True, share=False)
|