Spaces:
Sleeping
Sleeping
File size: 6,931 Bytes
32de25b 6e9c777 ed27e39 c338ef9 32de25b c338ef9 3dc01aa 6e9c777 d601a02 ed27e39 c338ef9 253f2c6 c338ef9 253f2c6 c338ef9 835de23 6e9c777 253f2c6 6e9c777 253f2c6 6e9c777 253f2c6 6e9c777 32de25b 09544f1 5d26448 6e9c777 248d55e 7fb70ca 6e9c777 a28d73c 6e9c777 253f2c6 248d55e 476039c c338ef9 248d55e 6e9c777 ed27e39 09544f1 7fb70ca ed27e39 6e9c777 248d55e 476039c 7fb70ca 248d55e 7f5a4d4 248d55e 6e9c777 09544f1 ed27e39 253f2c6 835de23 09544f1 835de23 6e9c777 a28d73c c338ef9 6e9c777 09544f1 ed27e39 248d55e 835de23 09544f1 248d55e ed27e39 248d55e 934dfd0 6e9c777 dbd6fa0 6e9c777 835de23 ed27e39 b8c1a3d 6e9c777 178c2d5 ed27e39 a28d73c 248d55e 253f2c6 248d55e 253f2c6 248d55e d601a02 09544f1 253f2c6 d601a02 248d55e 835de23 253f2c6 248d55e a28d73c 7fb70ca 248d55e a28d73c 2528f91 ed27e39 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
import gradio as gr
import json, time
from pydantic import BaseModel
from huggingface_hub import create_repo, upload_file, list_models, constants
from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
from google import genai
from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
# — JSON SPEC MODEL —
class RepoSpec(BaseModel):
repo_name: str
files: dict[str, str]
# — USER INFO & MODEL LISTING —
def show_profile(profile: gr.OAuthProfile | None) -> str:
return f"✅ Logged in as **{profile.username}**" if profile else "*Not logged in.*"
def list_private_models(
profile: gr.OAuthProfile | None,
oauth_token: gr.OAuthToken | None
) -> str:
if not (profile and oauth_token):
return "Please log in to see your models."
models = [
f"{m.id} ({'private' if m.private else 'public'})"
for m in list_models(author=profile.username, token=oauth_token.token)
]
return "No models found." if not models else "Models:\n\n" + "\n - ".join(models)
# — LOG FETCHING —
def _get_space_jwt(repo_id: str):
url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
r = get_session().get(url, headers=build_hf_headers())
hf_raise_for_status(r)
return r.json()["token"]
def fetch_logs(repo_id: str, level: str):
jwt = _get_space_jwt(repo_id)
url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
lines = []
with get_session().get(url, headers=build_hf_headers(token=jwt), stream=True) as resp:
hf_raise_for_status(resp)
for raw in resp.iter_lines():
if raw.startswith(b"data: "):
try:
ev = json.loads(raw[len(b"data: "):].decode())
lines.append(f"[{ev.get('timestamp','')}] {ev.get('data','')}")
except:
continue
return "\n".join(lines)
# — CORE LOOP: ASK LLM FOR JSON, WRITE & DEPLOY —
def handle_user_message(
history, # list of {"role","content"} dicts
user_prompt: str,
sdk_choice: str,
gemini_api_key: str,
grounding_enabled: bool,
profile: gr.OAuthProfile | None,
oauth_token: gr.OAuthToken | None
):
if not (profile and oauth_token):
return history + [{"role":"assistant","content":"⚠️ Please log in first."}], "", "", "<p>No Space yet.</p>"
client = genai.Client(api_key=gemini_api_key)
system_msg = {
"role":"system",
"content":(
"Return exactly one JSON object matching this schema:\n"
" • repo_name (string)\n"
" • files (object mapping filename→file-content)\n\n"
"Files must include:\n"
" - A code file (default name: app.py unless you choose otherwise)\n"
" - requirements.txt with dependencies\n"
" - README.md with frontmatter (title, emoji, sdk, sdk_version, app_file)\n\n"
"Do NOT output any extra text—only the JSON object."
)
}
chat = [system_msg] + history + [{"role":"user", "content":user_prompt}]
repo_id = None
build_logs = run_logs = ""
for _ in range(5):
# detect SDK version
if sdk_choice == "gradio":
import gradio as _gr; sdk_version = _gr.__version__
else:
import streamlit as _st; sdk_version = _st.__version__
tools = [Tool(google_search=GoogleSearch())] if grounding_enabled else []
cfg = GenerateContentConfig(
tools=tools,
response_modalities=["TEXT"],
response_mime_type="application/json",
response_schema=RepoSpec # <<< use the Pydantic model here
)
resp = client.models.generate_content(
model="gemini-2.5-flash-preview-04-17",
contents=[m["content"] for m in chat],
config=cfg
)
# validate & parse
spec = RepoSpec.model_validate_json(resp.text)
repo_name = spec.repo_name
files = spec.files
repo_id = f"{profile.username}/{repo_name}"
create_repo(
repo_id=repo_id,
token=oauth_token.token,
exist_ok=True,
repo_type="space",
space_sdk=sdk_choice
)
# write & upload files
for fn, content in files.items():
if fn.lower() == "readme.md":
content = content.replace("<SDK_VERSION>", sdk_version)
with open(fn, "w") as f:
f.write(content)
upload_file(
path_or_fileobj=fn,
path_in_repo=fn,
repo_id=repo_id,
token=oauth_token.token,
repo_type="space"
)
build_logs = fetch_logs(repo_id, "build")
run_logs = fetch_logs(repo_id, "run")
if "ERROR" not in build_logs.upper() and "ERROR" not in run_logs.upper():
break
chat.append({
"role":"user",
"content":(
f"Build logs:\n{build_logs}\n\n"
f"Run logs:\n{run_logs}\n\n"
"Please fix the JSON spec and return updated JSON only."
)
})
time.sleep(2)
messages = [{"role":m["role"], "content":m["content"]} for m in chat if m["role"]!="system"]
iframe = f'<iframe src="https://huggingface.co/spaces/{repo_id}" width="100%" height="500px"></iframe>'
return messages, build_logs, run_logs, iframe
# — BUILD THE UI —
with gr.Blocks(title="HF Space Auto‑Builder") as demo:
gr.Markdown("## Sign in + Auto‑Build Spaces\n\n"
"1. Sign in 2. Prompt 3. Deploy & Debug\n\n"
"_LLM controls filenames, code, README, requirements, and loops until successful._\n\n---")
login_btn = gr.LoginButton(variant="huggingface", size="lg")
status_md = gr.Markdown("*Not logged in.*")
models_md = gr.Markdown()
demo.load(show_profile, None, status_md)
demo.load(list_private_models, None, models_md)
login_btn.click(show_profile, None, status_md)
login_btn.click(list_private_models, None, models_md)
sdk_choice = gr.Radio(["gradio","streamlit"], "gradio", label="SDK")
api_key = gr.Textbox(label="Gemini API Key", type="password")
grounding = gr.Checkbox(label="Enable grounding")
chatbot = gr.Chatbot(type="messages")
user_in = gr.Textbox(label="Prompt", placeholder="e.g. Build a CSV inspector…")
send_btn = gr.Button("Send")
build_box = gr.Textbox(label="Build logs", lines=5)
run_box = gr.Textbox(label="Run logs", lines=5)
preview = gr.HTML("<p>No Space yet.</p>")
send_btn.click(
fn=handle_user_message,
inputs=[chatbot, user_in, sdk_choice, api_key, grounding],
outputs=[chatbot, build_box, run_box, preview]
)
if __name__ == "__main__":
demo.launch()
|