Spaces:
Sleeping
Sleeping
import re | |
import json | |
import time | |
import requests | |
import importlib.metadata | |
import gradio as gr | |
from huggingface_hub import ( | |
create_repo, upload_file, list_models, constants | |
) | |
from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status | |
from google import genai | |
from google.genai.types import Tool, GenerateContentConfig, GoogleSearch | |
# — USER INFO & MODEL LISTING — | |
def show_profile(profile: gr.OAuthProfile | None) -> str: | |
return f"✅ Logged in as **{profile.username}**" if profile else "*Not logged in.*" | |
def list_private_models( | |
profile: gr.OAuthProfile | None, | |
oauth_token: gr.OAuthToken | None | |
) -> str: | |
if not profile or not oauth_token: | |
return "Please log in to see your models." | |
models = [ | |
f"{m.id} ({'private' if m.private else 'public'})" | |
for m in list_models(author=profile.username, token=oauth_token.token) | |
] | |
return "No models found." if not models else "Models:\n\n" + "\n - ".join(models) | |
# — UTILITIES — | |
def get_sdk_version(sdk_choice: str) -> str: | |
pkg = "gradio" if sdk_choice == "gradio" else "streamlit" | |
try: | |
return importlib.metadata.version(pkg) | |
except importlib.metadata.PackageNotFoundError: | |
return "UNKNOWN" | |
def extract_code(text: str) -> str: | |
blocks = re.findall(r"```(?:\w*\n)?([\s\S]*?)```", text) | |
return blocks[-1].strip() if blocks else text.strip() | |
def classify_errors(logs: str) -> str: | |
errs = set() | |
for line in logs.splitlines(): | |
if "SyntaxError" in line: | |
errs.add("syntax") | |
elif "ImportError" in line or "ModuleNotFoundError" in line: | |
errs.add("import") | |
elif "Traceback" in line or "Exception" in line: | |
errs.add("runtime") | |
return ", ".join(errs) or "unknown" | |
# — HF SPACE LOGGING — | |
def _get_space_jwt(repo_id: str) -> str: | |
url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt" | |
r = get_session().get(url, headers=build_hf_headers()) | |
hf_raise_for_status(r) | |
return r.json()["token"] | |
def fetch_logs(repo_id: str, level: str) -> str: | |
jwt = _get_space_jwt(repo_id) | |
url = f"https://api.hf.space/v1/{repo_id}/logs/{level}" | |
lines = [] | |
with get_session().get(url, headers=build_hf_headers(token=jwt), stream=True) as resp: | |
hf_raise_for_status(resp) | |
for raw in resp.iter_lines(): | |
if raw.startswith(b"data: "): | |
try: | |
ev = json.loads(raw[len(b"data: "):].decode()) | |
ts, txt = ev.get("timestamp",""), ev.get("data","") | |
lines.append(f"[{ts}] {txt}") | |
except: | |
continue | |
return "\n".join(lines) | |
def check_iframe(url: str, timeout: int = 5) -> bool: | |
try: | |
return requests.get(url, timeout=timeout).status_code == 200 | |
except: | |
return False | |
# — CORE LOOP WITH TWO AGENTS — | |
def handle_user_message( | |
history, | |
sdk_choice: str, | |
gemini_api_key: str, | |
grounding_enabled: bool, | |
temperature: float, | |
max_output_tokens: int, | |
profile: gr.OAuthProfile | None, | |
oauth_token: gr.OAuthToken | None | |
): | |
if not profile or not oauth_token: | |
return history + [{"role":"assistant","content":"⚠️ Please log in first."}], "", "", "<p>No Space yet.</p>" | |
client = genai.Client(api_key=gemini_api_key) | |
code_fn = "app.py" if sdk_choice=="gradio" else "streamlit_app.py" | |
repo_id = f"{profile.username}/{profile.username}-auto-space" | |
iframe_url = f"https://huggingface.co/spaces/{repo_id}" | |
# — SYSTEM PROMPTS — | |
system_code = { | |
"role":"system", | |
"content":( | |
"You are **Code‑Gen Agent**, a proactive AI developer. Your sole responsibility is to author " | |
f"and correct the entire `{code_fn}` file in a single markdown code block—no extra commentary. " | |
"You have permission to edit files, push updates to the HF Space, and optimize code. " | |
"After each push, await build & run logs before making further changes." | |
) | |
} | |
system_debug = { | |
"role":"system", | |
"content":( | |
"You are **Debug Agent**, a meticulous code reviewer. You can read all files, logs, and the app " | |
"preview, but you **cannot** modify or push code. Your task is to analyze the latest code + logs " | |
"and return concise, actionable feedback or “All clear.”" | |
) | |
} | |
# initialize each agent’s conversation | |
code_chat = [system_code] + history[:] | |
debug_chat = [system_debug] + history[:] | |
build_logs = run_logs = "" | |
backoff = 1 | |
for attempt in range(1, 7): | |
tools = [Tool(google_search=GoogleSearch())] if grounding_enabled else [] | |
cfg = GenerateContentConfig( | |
tools=tools, | |
response_modalities=["TEXT"], | |
temperature=temperature, | |
max_output_tokens=max_output_tokens, | |
) | |
# --- 1) Code‑Gen generates or updates code --- | |
resp_code = client.models.generate_content( | |
model="gemini-2.5-flash-preview-04-17", | |
contents=[m["content"] for m in code_chat], | |
config=cfg | |
) | |
code = extract_code(resp_code.text) | |
code_chat.append({"role":"assistant","content":code}) | |
debug_chat.append({"role":"assistant","content":code}) | |
# quick syntax check | |
try: | |
compile(code, code_fn, "exec") | |
except SyntaxError as e: | |
code_chat.append({ | |
"role":"user", | |
"content": f"SyntaxError caught: {e}. Please correct `{code_fn}` only." | |
}) | |
time.sleep(backoff); backoff = min(backoff*2, 30) | |
continue | |
# write & push to HF Space | |
sdk_version = get_sdk_version(sdk_choice) | |
files = { | |
code_fn: code, | |
"README.md": f"""--- | |
title: Wuhp Auto Space | |
emoji: 🐢 | |
sdk: {sdk_choice} | |
sdk_version: {sdk_version} | |
app_file: {code_fn} | |
pinned: false | |
--- | |
""", | |
"requirements.txt": "pandas\n" + ("streamlit\n" if sdk_choice=="streamlit" else "gradio\n") | |
} | |
for fn, content in files.items(): | |
with open(fn, "w") as f: | |
f.write(content) | |
create_repo(repo_id=repo_id, token=oauth_token.token, | |
exist_ok=True, repo_type="space", space_sdk=sdk_choice) | |
for fn in files: | |
upload_file( | |
path_or_fileobj=fn, path_in_repo=fn, | |
repo_id=repo_id, token=oauth_token.token, | |
repo_type="space" | |
) | |
# fetch logs | |
build_logs = fetch_logs(repo_id, "build") | |
run_logs = fetch_logs(repo_id, "run") | |
err_types = classify_errors(build_logs + "\n" + run_logs) | |
# --- 2) Debug‑Agent reviews code & logs --- | |
debug_input = ( | |
f"🏷 **Attempt {attempt}**\n" | |
f"Error types: {err_types}\n\n" | |
f"**Build logs:**\n{build_logs}\n\n" | |
f"**Run logs:**\n{run_logs}\n\n" | |
"If there are no errors, reply “All clear.” Otherwise, list your recommended changes." | |
) | |
debug_chat.append({"role":"user","content":debug_input}) | |
resp_debug = client.models.generate_content( | |
model="gemini-2.5-flash-preview-04-17", | |
contents=[m["content"] for m in debug_chat], | |
config=cfg | |
) | |
feedback = resp_debug.text.strip() | |
debug_chat.append({"role":"assistant","content":feedback}) | |
# check for success | |
if "ERROR" not in build_logs.upper() and \ | |
"ERROR" not in run_logs.upper() and \ | |
check_iframe(iframe_url): | |
break | |
# feed debug feedback back to Code‑Gen | |
code_chat.append({ | |
"role":"user", | |
"content": f"🔧 Debug feedback:\n{feedback}\nPlease output the full corrected `{code_fn}` code block only." | |
}) | |
time.sleep(backoff); backoff = min(backoff*2, 30) | |
# prepare UI outputs | |
messages = [ | |
{"role": m["role"], "content": m["content"]} | |
for m in code_chat if m["role"] != "system" | |
] | |
iframe_html = ( | |
f'<iframe src="{iframe_url}" width="100%" height="500px"></iframe>' | |
+ ("" if check_iframe(iframe_url) | |
else "<p style='color:red;'>⚠️ iframe not responding.</p>") | |
) | |
return messages, build_logs, run_logs, iframe_html | |
# — SIMPLE UI WITH HIGHER MAX TOKENS — | |
with gr.Blocks(title="HF Space Auto‑Builder") as demo: | |
gr.Markdown("## 🐢 HF Space Auto‑Builder\n1) Sign in 2) Enter prompt 3) Watch code, logs & preview") | |
login_btn = gr.LoginButton(variant="huggingface", size="lg") | |
status_md = gr.Markdown("*Not logged in.*") | |
models_md = gr.Markdown() | |
demo.load(show_profile, inputs=None, outputs=status_md) | |
demo.load(list_private_models, inputs=None, outputs=models_md) | |
login_btn.click(show_profile, inputs=None, outputs=status_md) | |
login_btn.click(list_private_models, inputs=None, outputs=models_md) | |
sdk_choice = gr.Radio(["gradio","streamlit"], value="gradio", label="SDK") | |
api_key = gr.Textbox(label="Gemini API Key", type="password") | |
grounding = gr.Checkbox(label="Enable grounding", value=False) | |
temp = gr.Slider(0,1,value=0.2, label="Temperature") | |
max_tokens = gr.Number(value=2048, label="Max output tokens (set up to Gemini’s limit)") | |
chatbot = gr.Chatbot(type="messages") | |
user_in = gr.Textbox(placeholder="Your prompt…", label="Prompt", lines=1) | |
send_btn = gr.Button("Send") | |
build_box = gr.Textbox(label="Build logs", lines=5, interactive=False) | |
run_box = gr.Textbox(label="Run logs", lines=5, interactive=False) | |
preview = gr.HTML("<p>No Space yet.</p>") | |
send_btn.click( | |
fn=handle_user_message, | |
inputs=[chatbot, sdk_choice, api_key, grounding, temp, max_tokens], | |
outputs=[chatbot, build_box, run_box, preview] | |
) | |
user_in.submit( | |
fn=handle_user_message, | |
inputs=[chatbot, sdk_choice, api_key, grounding, temp, max_tokens], | |
outputs=[chatbot, build_box, run_box, preview] | |
) | |
refresh_btn = gr.Button("Refresh Logs") | |
refresh_btn.click( | |
fn=lambda profile, token: ( | |
fetch_logs(f"{profile.username}/{profile.username}-auto-space", "build"), | |
fetch_logs(f"{profile.username}/{profile.username}-auto-space", "run") | |
), | |
outputs=[build_box, run_box] | |
) | |
demo.launch(server_name="0.0.0.0", server_port=7860) | |