Spaces:
Sleeping
Sleeping
File size: 5,958 Bytes
32de25b 6e9c777 32de25b 6e9c777 3dc01aa 6e9c777 3dc01aa 6e9c777 32de25b 6e9c777 5d26448 6e9c777 476039c 6e9c777 3dc01aa 476039c 6e9c777 476039c 3dc01aa 476039c 3dc01aa 476039c 6e9c777 3dc01aa 6e9c777 476039c 6e9c777 476039c 6e9c777 dbd6fa0 3dc01aa 6e9c777 dbd6fa0 a0f57d6 6e9c777 3dc01aa a0f57d6 934dfd0 6e9c777 851efc4 6e9c777 dbd6fa0 6e9c777 b8c1a3d 6e9c777 178c2d5 6e9c777 a0f57d6 6e9c777 851efc4 6e9c777 a0f57d6 6e9c777 476039c 6e9c777 a0f57d6 6e9c777 476039c 6e9c777 476039c d4653f4 476039c d4653f4 476039c 6e9c777 476039c 6e9c777 476039c 6e9c777 2528f91 6e9c777 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import gradio as gr
import json, time
from huggingface_hub import create_repo, upload_file, constants
from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
from google import genai # Gemini Python SDK
from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
# — HELPERS FOR HF SPACE LOGS —
def _get_space_jwt(repo_id: str):
url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
r = get_session().get(url, headers=build_hf_headers())
hf_raise_for_status(r)
return r.json()["token"]
def fetch_logs(repo_id: str, level: str):
jwt = _get_space_jwt(repo_id)
logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
lines = []
with get_session().get(logs_url, headers=build_hf_headers(token=jwt), stream=True) as resp:
hf_raise_for_status(resp)
for raw in resp.iter_lines():
if raw.startswith(b"data: "):
try:
ev = json.loads(raw[len(b"data: "):].decode())
ts = ev.get("timestamp","")
txt = ev.get("data","")
lines.append(f"[{ts}] {txt}")
except:
continue
return "\n".join(lines)
# — CORE LOOP: send prompt & (iteratively) deploy —
def handle_user_message(
history,
hf_profile, hf_token,
sdk_choice: str,
gemini_api_key, grounding_enabled
):
# Ensure we actually have a profile & token
if hf_profile is None or hf_token is None:
return history + [("assistant", "⚠️ Please log in first.")], "", "", "<p>No Space yet.</p>"
genai_client = genai.Client(api_key=gemini_api_key)
chat = [{
"role":"system",
"content":(
f"You are an AI assistant that writes a HuggingFace Space using the "
f"{sdk_choice} SDK. After producing code, wait for logs. "
"If errors appear, fix them and return the full updated code."
)
}]
for role, msg in history:
chat.append({"role": role, "content": msg})
filename = "app.py" if sdk_choice=="gradio" else "streamlit_app.py"
build_logs = run_logs = ""
for _ in range(5):
# build tool list for grounding
tools = []
if grounding_enabled:
tools.append(Tool(google_search=GoogleSearch()))
config = GenerateContentConfig(tools=tools, response_modalities=["TEXT"])
resp = genai_client.models.generate_content(
model="gemini-2.5-flash-preview-04-17",
contents=[c["content"] for c in chat],
config=config
)
ai_code = resp.text
chat.append({"role":"assistant", "content": ai_code})
# write & deploy
with open(filename, "w") as f:
f.write(ai_code)
repo_id = f"{hf_profile.username}/{hf_profile.username}-auto-space"
create_repo(
repo_id=repo_id,
token=hf_token.token,
exist_ok=True,
repo_type="space",
space_sdk=sdk_choice
)
upload_file(
path_or_fileobj=filename,
path_in_repo=filename,
repo_id=repo_id,
token=hf_token.token,
repo_type="space"
)
build_logs = fetch_logs(repo_id, "build")
run_logs = fetch_logs(repo_id, "run")
if "ERROR" not in build_logs.upper() and "ERROR" not in run_logs.upper():
break
chat.append({
"role":"user",
"content":(
f"Build logs:\n{build_logs}\n\n"
f"Run logs:\n{run_logs}\n\n"
"Please fix the code."
)
})
time.sleep(2)
new_history = [(h["role"], h["content"]) for h in chat if h["role"]!="system"]
iframe = f'<iframe src="https://huggingface.co/spaces/{repo_id}" width="100%" height="500px"></iframe>'
return new_history, build_logs, run_logs, iframe
# — GRADIO UI —
with gr.Blocks(title="HF Space Auto‑Builder (Gradio & Streamlit)") as demo:
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### Sidebar")
# hidden state holders for OAuthProfile and OAuthToken
profile_state = gr.State()
token_state = gr.State()
login_btn = gr.LoginButton("huggingface", size="sm")
login_status = gr.Markdown("*Not logged in.*")
# capture profile & token into state
login_btn.click(
lambda profile, oauth_token: (profile, oauth_token),
outputs=[profile_state, token_state]
)
# update status text
login_btn.click(
lambda profile, oauth_token: f"Logged in as **{profile.username}**"
if profile else "*Not logged in.*",
outputs=login_status
)
sdk_choice = gr.Radio(
["gradio","streamlit"], value="gradio", label="SDK Template"
)
api_key = gr.Textbox(label="Gemini API Key", type="password")
grounding = gr.Checkbox(label="Enable grounding", value=False)
with gr.Column(scale=3):
chatbot = gr.Chatbot(type="messages")
user_in = gr.Textbox(placeholder="Your prompt …", label="Prompt")
send_btn = gr.Button("Send")
build_box = gr.Textbox(label="Build logs", lines=5, interactive=False)
run_box = gr.Textbox(label="Run logs", lines=5, interactive=False)
preview = gr.HTML("<p>No Space yet.</p>")
send_btn.click(
fn=handle_user_message,
inputs=[
chatbot,
profile_state, token_state,
sdk_choice, api_key, grounding
],
outputs=[chatbot, build_box, run_box, preview]
)
if __name__=="__main__":
demo.launch()
|