Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
import requests | |
import pandas as pd | |
from huggingface_hub import login | |
from dotenv import load_dotenv | |
# Cargar variables de entorno | |
load_dotenv() | |
API_BASE_URL = "https://my-custom-api.hf.space" | |
MODEL_NAME = os.getenv("MODEL_NAME", "meta-llama/Meta-Llama-3-8B-Instruct") # o usa 'google/gemma-2b-it' | |
def call_model(prompt): | |
headers = { | |
"Authorization": f"Bearer {os.environ['HF_TOKEN']}", | |
"Content-Type": "application/json" | |
} | |
payload = { | |
"inputs": prompt, | |
"parameters": {"max_new_tokens": 512} | |
} | |
response = requests.post( | |
f"https://api-inference.huggingface.co/models/{MODEL_NAME}", | |
headers=headers, | |
json=payload, | |
timeout=60 | |
) | |
result = response.json() | |
if isinstance(result, dict) and result.get("error"): | |
return f"ERROR: {result['error']}" | |
return result[0]["generated_text"] if isinstance(result, list) else result | |
def execute_agent_operations(profile: gr.OAuthProfile | None): | |
space_id = os.getenv("MY_SPACE_ID") | |
if profile: | |
username = f"{profile.username}" | |
print(f"Usuario conectado: {username}") | |
else: | |
print("No has iniciado sesión.") | |
return "Inicia sesión en Hugging Face.", None | |
questions_url = f"{API_BASE_URL}/questions" | |
attachments_url = f"{API_BASE_URL}/files/" | |
submit_url = f"{API_BASE_URL}/submit" | |
try: | |
response = requests.get(questions_url, timeout=15) | |
response.raise_for_status() | |
questions = response.json() | |
if not questions: | |
return "No se encontraron preguntas.", None | |
for q in questions: | |
task_id = q.get("task_id") | |
file_name = q.get("file_name", "") | |
if task_id and file_name: | |
try: | |
att_response = requests.get(f"{attachments_url}{task_id}", timeout=15) | |
att_response.raise_for_status() | |
q["attachment_b64"] = att_response.text | |
except Exception as e: | |
print(f"Error al obtener adjunto: {e}") | |
q["attachment_b64"] = None | |
except Exception as e: | |
return f"Error al obtener preguntas: {e}", None | |
results_log = [] | |
answers_payload = [] | |
for item in questions: | |
task_id = item.get("task_id") | |
question_text = item.get("question", "") | |
attachment = item.get("attachment_b64", "") | |
full_prompt = f"{question_text}\n\nArchivo adjunto:\n{attachment}" if attachment else question_text | |
if not task_id or not full_prompt: | |
continue | |
try: | |
submitted_answer = call_model(full_prompt) | |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer}) | |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer}) | |
except Exception as e: | |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"ERROR: {e}"}) | |
if not answers_payload: | |
return "El agente no produjo respuestas.", pd.DataFrame(results_log) | |
submission_data = { | |
"username": username.strip(), | |
"agent_code": f"https://huggingface.co/spaces/{space_id}", | |
"answers": answers_payload | |
} | |
try: | |
post_response = requests.post(submit_url, json=submission_data, timeout=60) | |
post_response.raise_for_status() | |
result_data = post_response.json() | |
score = result_data.get("score", "N/A") | |
return f"¡Envío exitoso!\nPuntuación: {score}", pd.DataFrame(results_log) | |
except Exception as e: | |
return f"Error al enviar: {e}", pd.DataFrame(results_log) | |
# Interfaz Gradio | |
with gr.Blocks() as demo: | |
gr.Markdown("# Evaluador de Agente (versión personalizada)") | |
gr.LoginButton() | |
run_button = gr.Button("Ejecutar Evaluación y Enviar Respuestas") | |
status_output = gr.Textbox(label="Estado", lines=3) | |
results_table = gr.DataFrame(label="Respuestas del agente") | |
run_button.click( | |
fn=execute_agent_operations, | |
outputs=[status_output, results_table] | |
) | |
if __name__ == "__main__": | |
demo.launch(debug=True) | |