wuhp commited on
Commit
50f731d
·
verified ·
1 Parent(s): b874ac3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +161 -76
app.py CHANGED
@@ -1,9 +1,12 @@
1
  import re
2
  import json
3
  import time
 
4
  import importlib.metadata
5
  import gradio as gr
6
- from huggingface_hub import create_repo, upload_file, list_models, constants
 
 
7
  from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
8
  from google import genai
9
  from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
@@ -11,15 +14,13 @@ from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
11
  # — USER INFO & MODEL LISTING —
12
 
13
  def show_profile(profile: gr.OAuthProfile | None) -> str:
14
- if profile is None:
15
- return "*Not logged in.*"
16
- return f"✅ Logged in as **{profile.username}**"
17
 
18
  def list_private_models(
19
  profile: gr.OAuthProfile | None,
20
  oauth_token: gr.OAuthToken | None
21
  ) -> str:
22
- if profile is None or oauth_token is None:
23
  return "Please log in to see your models."
24
  models = [
25
  f"{m.id} ({'private' if m.private else 'public'})"
@@ -40,9 +41,20 @@ def extract_code(text: str) -> str:
40
  blocks = re.findall(r"```(?:\w*\n)?([\s\S]*?)```", text)
41
  return blocks[-1].strip() if blocks else text.strip()
42
 
 
 
 
 
 
 
 
 
 
 
 
43
  # — HF SPACE LOGGING —
44
 
45
- def _get_space_jwt(repo_id: str):
46
  url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
47
  r = get_session().get(url, headers=build_hf_headers())
48
  hf_raise_for_status(r)
@@ -50,21 +62,27 @@ def _get_space_jwt(repo_id: str):
50
 
51
  def fetch_logs(repo_id: str, level: str) -> str:
52
  jwt = _get_space_jwt(repo_id)
53
- logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
54
  lines = []
55
- with get_session().get(logs_url, headers=build_hf_headers(token=jwt), stream=True) as resp:
56
  hf_raise_for_status(resp)
57
  for raw in resp.iter_lines():
58
  if raw.startswith(b"data: "):
59
  try:
60
  ev = json.loads(raw[len(b"data: "):].decode())
61
- ts = ev.get("timestamp","")
62
- txt = ev.get("data","")
63
  lines.append(f"[{ts}] {txt}")
64
  except:
65
  continue
66
  return "\n".join(lines)
67
 
 
 
 
 
 
 
 
68
  # — CORE LOOP —
69
 
70
  def handle_user_message(
@@ -72,18 +90,21 @@ def handle_user_message(
72
  sdk_choice: str,
73
  gemini_api_key: str,
74
  grounding_enabled: bool,
 
 
75
  profile: gr.OAuthProfile | None,
76
  oauth_token: gr.OAuthToken | None
77
  ):
78
- if profile is None or oauth_token is None:
79
  return history + [{"role":"assistant","content":"⚠️ Please log in first."}], "", "", "<p>No Space yet.</p>"
80
 
81
  client = genai.Client(api_key=gemini_api_key)
82
  system_msg = {
83
  "role":"system",
84
  "content":(
85
- f"You are an AI assistant writing a HuggingFace Space using the "
86
- f"{sdk_choice} SDK. After producing code, wait for logs; if errors appear, fix them."
 
87
  )
88
  }
89
  chat = [system_msg] + history
@@ -92,120 +113,184 @@ def handle_user_message(
92
  readme_fn = "README.md"
93
  reqs_fn = "requirements.txt"
94
  repo_id = f"{profile.username}/{profile.username}-auto-space"
 
95
 
96
  build_logs = run_logs = ""
97
- for _ in range(5):
 
 
98
  tools = [Tool(google_search=GoogleSearch())] if grounding_enabled else []
99
- cfg = GenerateContentConfig(tools=tools, response_modalities=["TEXT"])
 
 
 
 
 
100
 
 
101
  resp = client.models.generate_content(
102
  model="gemini-2.5-flash-preview-04-17",
103
  contents=[m["content"] for m in chat],
104
  config=cfg
105
  )
106
-
107
- raw = resp.text
108
- code = extract_code(raw)
109
  chat.append({"role":"assistant","content":code})
110
 
111
- # write code
112
- with open(code_fn, "w") as f:
113
- f.write(code)
114
-
115
- # write dynamic README
 
 
 
 
 
 
 
 
116
  sdk_version = get_sdk_version(sdk_choice)
117
- readme = f"""---
 
 
118
  title: Wuhp Auto Space
119
  emoji: 🐢
120
- colorFrom: red
121
- colorTo: pink
122
  sdk: {sdk_choice}
123
  sdk_version: {sdk_version}
124
  app_file: {code_fn}
125
  pinned: false
126
  ---
127
 
128
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
129
- """
130
- with open(readme_fn, "w") as f:
131
- f.write(readme)
132
-
133
- # write requirements
134
- base_reqs = "pandas\n"
135
- extra = "streamlit\n" if sdk_choice=="streamlit" else "gradio\n"
136
- with open(reqs_fn, "w") as f:
137
- f.write(base_reqs + extra)
138
 
139
- # push to HF
140
  create_repo(repo_id=repo_id, token=oauth_token.token,
141
  exist_ok=True, repo_type="space", space_sdk=sdk_choice)
142
- for fn in (code_fn, readme_fn, reqs_fn):
143
- upload_file(path_or_fileobj=fn, path_in_repo=fn,
144
- repo_id=repo_id, token=oauth_token.token,
145
- repo_type="space")
146
 
 
147
  build_logs = fetch_logs(repo_id, "build")
148
  run_logs = fetch_logs(repo_id, "run")
149
- if "ERROR" not in build_logs.upper() and "ERROR" not in run_logs.upper():
 
 
 
 
 
150
  break
151
 
 
152
  chat.append({
153
  "role":"user",
154
  "content":(
 
155
  f"Build logs:\n{build_logs}\n\n"
156
  f"Run logs:\n{run_logs}\n\n"
157
- "Please fix the code."
158
  )
159
  })
160
- time.sleep(2)
 
 
 
 
 
 
 
 
161
 
162
- messages = [{"role":m["role"],"content":m["content"]} for m in chat if m["role"]!="system"]
163
- iframe = f'<iframe src="https://huggingface.co/spaces/{repo_id}" width="100%" height="500px"></iframe>'
164
- return messages, build_logs, run_logs, iframe
165
 
166
- # — BUILD THE UI —
167
 
168
- with gr.Blocks(title="HF Space Auto‑Builder") as demo:
169
- gr.Markdown("## Sign in + Auto‑Build Spaces\n\n1. Sign in 2. Enter your prompt 3. Watch code, README, requirements, logs, and preview\n\n---")
 
 
170
 
171
- # LOGIN & MODEL LISTING
172
- login_btn = gr.LoginButton(variant="huggingface", size="lg")
173
- status_md = gr.Markdown("*Not logged in.*")
174
- models_md = gr.Markdown()
175
- demo.load(show_profile, inputs=None, outputs=status_md)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  demo.load(list_private_models, inputs=None, outputs=models_md)
177
- login_btn.click(show_profile, inputs=None, outputs=status_md)
178
  login_btn.click(list_private_models, inputs=None, outputs=models_md)
179
 
180
- # CONTROLS
181
- sdk_choice = gr.Radio(["gradio","streamlit"], value="gradio", label="SDK template")
182
- api_key = gr.Textbox(label="Gemini API Key", type="password")
183
- grounding = gr.Checkbox(label="Enable grounding", value=False)
184
-
185
- # CHAT + OUTPUTS
186
- chatbot = gr.Chatbot(type="messages")
187
- user_in = gr.Textbox(placeholder="Your prompt…", label="Prompt")
188
- send_btn = gr.Button("Send")
189
-
190
- build_box = gr.Textbox(label="Build logs", lines=5, interactive=False)
191
- run_box = gr.Textbox(label="Run logs", lines=5, interactive=False)
192
- preview = gr.HTML("<p>No Space yet.</p>")
193
-
194
  send_btn.click(
195
  fn=handle_user_message,
196
- inputs=[chatbot, sdk_choice, api_key, grounding],
 
 
 
 
 
 
 
 
 
 
 
 
 
197
  outputs=[chatbot, build_box, run_box, preview]
198
  )
199
 
200
- # Refresh Logs button —
201
- def _refresh(profile: gr.OAuthProfile | None, oauth_token: gr.OAuthToken | None):
 
202
  if not profile or not oauth_token:
203
  return "", ""
204
  repo = f"{profile.username}/{profile.username}-auto-space"
205
  return fetch_logs(repo, "build"), fetch_logs(repo, "run")
206
-
207
- refresh_btn = gr.Button("Refresh Logs")
208
- # Gradio will auto‑inject `profile` and `oauth_token` here.
209
  refresh_btn.click(_refresh, inputs=None, outputs=[build_box, run_box])
210
 
211
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
1
  import re
2
  import json
3
  import time
4
+ import requests
5
  import importlib.metadata
6
  import gradio as gr
7
+ from huggingface_hub import (
8
+ create_repo, upload_file, list_models, constants
9
+ )
10
  from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
11
  from google import genai
12
  from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
 
14
  # — USER INFO & MODEL LISTING —
15
 
16
  def show_profile(profile: gr.OAuthProfile | None) -> str:
17
+ return f"✅ Logged in as **{profile.username}**" if profile else "*Not logged in.*"
 
 
18
 
19
  def list_private_models(
20
  profile: gr.OAuthProfile | None,
21
  oauth_token: gr.OAuthToken | None
22
  ) -> str:
23
+ if not profile or not oauth_token:
24
  return "Please log in to see your models."
25
  models = [
26
  f"{m.id} ({'private' if m.private else 'public'})"
 
41
  blocks = re.findall(r"```(?:\w*\n)?([\s\S]*?)```", text)
42
  return blocks[-1].strip() if blocks else text.strip()
43
 
44
+ def classify_errors(logs: str) -> str:
45
+ errs = set()
46
+ for line in logs.splitlines():
47
+ if "SyntaxError" in line:
48
+ errs.add("syntax")
49
+ elif "ImportError" in line or "ModuleNotFoundError" in line:
50
+ errs.add("import")
51
+ elif "Traceback" in line or "Exception" in line:
52
+ errs.add("runtime")
53
+ return ", ".join(errs) or "unknown"
54
+
55
  # — HF SPACE LOGGING —
56
 
57
+ def _get_space_jwt(repo_id: str) -> str:
58
  url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
59
  r = get_session().get(url, headers=build_hf_headers())
60
  hf_raise_for_status(r)
 
62
 
63
  def fetch_logs(repo_id: str, level: str) -> str:
64
  jwt = _get_space_jwt(repo_id)
65
+ url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
66
  lines = []
67
+ with get_session().get(url, headers=build_hf_headers(token=jwt), stream=True) as resp:
68
  hf_raise_for_status(resp)
69
  for raw in resp.iter_lines():
70
  if raw.startswith(b"data: "):
71
  try:
72
  ev = json.loads(raw[len(b"data: "):].decode())
73
+ ts, txt = ev.get("timestamp",""), ev.get("data","")
 
74
  lines.append(f"[{ts}] {txt}")
75
  except:
76
  continue
77
  return "\n".join(lines)
78
 
79
+ def check_iframe(url: str, timeout: int = 5) -> bool:
80
+ try:
81
+ r = requests.get(url, timeout=timeout)
82
+ return r.status_code == 200
83
+ except:
84
+ return False
85
+
86
  # — CORE LOOP —
87
 
88
  def handle_user_message(
 
90
  sdk_choice: str,
91
  gemini_api_key: str,
92
  grounding_enabled: bool,
93
+ temperature: float,
94
+ max_output_tokens: int,
95
  profile: gr.OAuthProfile | None,
96
  oauth_token: gr.OAuthToken | None
97
  ):
98
+ if not profile or not oauth_token:
99
  return history + [{"role":"assistant","content":"⚠️ Please log in first."}], "", "", "<p>No Space yet.</p>"
100
 
101
  client = genai.Client(api_key=gemini_api_key)
102
  system_msg = {
103
  "role":"system",
104
  "content":(
105
+ f"You are an AI assistant that scaffolds a complete HuggingFace Space using the "
106
+ f"{sdk_choice} SDK. Generate working code, handle errors, and ensure the deployed "
107
+ "iframe loads successfully."
108
  )
109
  }
110
  chat = [system_msg] + history
 
113
  readme_fn = "README.md"
114
  reqs_fn = "requirements.txt"
115
  repo_id = f"{profile.username}/{profile.username}-auto-space"
116
+ iframe_url = f"https://huggingface.co/spaces/{repo_id}"
117
 
118
  build_logs = run_logs = ""
119
+ backoff = 1
120
+
121
+ for attempt in range(1, 7):
122
  tools = [Tool(google_search=GoogleSearch())] if grounding_enabled else []
123
+ cfg = GenerateContentConfig(
124
+ tools=tools,
125
+ response_modalities=["TEXT"],
126
+ temperature=temperature,
127
+ max_output_tokens=max_output_tokens,
128
+ )
129
 
130
+ # generate code
131
  resp = client.models.generate_content(
132
  model="gemini-2.5-flash-preview-04-17",
133
  contents=[m["content"] for m in chat],
134
  config=cfg
135
  )
136
+ code = extract_code(resp.text)
 
 
137
  chat.append({"role":"assistant","content":code})
138
 
139
+ # local syntax check
140
+ try:
141
+ compile(code, code_fn, "exec")
142
+ except SyntaxError as e:
143
+ chat.append({
144
+ "role":"user",
145
+ "content": f"SyntaxError caught locally: {e}. Please fix the code."
146
+ })
147
+ time.sleep(backoff)
148
+ backoff = min(backoff * 2, 30)
149
+ continue
150
+
151
+ # write code, README, requirements
152
  sdk_version = get_sdk_version(sdk_choice)
153
+ files = {
154
+ code_fn: code,
155
+ readme_fn: f"""---
156
  title: Wuhp Auto Space
157
  emoji: 🐢
 
 
158
  sdk: {sdk_choice}
159
  sdk_version: {sdk_version}
160
  app_file: {code_fn}
161
  pinned: false
162
  ---
163
 
164
+ See https://huggingface.co/docs/hub/spaces-config-reference
165
+ """,
166
+ reqs_fn: "pandas\n" + ("streamlit\n" if sdk_choice=="streamlit" else "gradio\n")
167
+ }
168
+ for fn, content in files.items():
169
+ with open(fn, "w") as f:
170
+ f.write(content)
 
 
 
171
 
172
+ # push to HuggingFace
173
  create_repo(repo_id=repo_id, token=oauth_token.token,
174
  exist_ok=True, repo_type="space", space_sdk=sdk_choice)
175
+ for fn in files:
176
+ upload_file(fn, fn, repo_id=repo_id,
177
+ token=oauth_token.token, repo_type="space")
 
178
 
179
+ # fetch logs
180
  build_logs = fetch_logs(repo_id, "build")
181
  run_logs = fetch_logs(repo_id, "run")
182
+ errors = classify_errors(build_logs + "\n" + run_logs)
183
+
184
+ # success criteria
185
+ if "ERROR" not in build_logs.upper() and \
186
+ "ERROR" not in run_logs.upper() and \
187
+ check_iframe(iframe_url):
188
  break
189
 
190
+ # ask for a fix
191
  chat.append({
192
  "role":"user",
193
  "content":(
194
+ f"Attempt {attempt} encountered {errors} errors.\n"
195
  f"Build logs:\n{build_logs}\n\n"
196
  f"Run logs:\n{run_logs}\n\n"
197
+ f"Please fix the code and ensure the iframe at {iframe_url} returns HTTP 200."
198
  )
199
  })
200
+ time.sleep(backoff)
201
+ backoff = min(backoff * 2, 30)
202
+
203
+ messages = [{"role":m["role"], "content":m["content"]} for m in chat if m["role"]!="system"]
204
+ iframe_html = (
205
+ f'<iframe src="{iframe_url}" width="100%" height="500px"></iframe>'
206
+ + ("" if check_iframe(iframe_url) else
207
+ "<p style='color:red;'>⚠️ iframe not responding.</p>")
208
+ )
209
 
210
+ return messages, build_logs, run_logs, iframe_html
 
 
211
 
212
+ # — BUILD THE UI (Improved)
213
 
214
+ custom_css = """
215
+ .gradio-container { max-width: 1200px; margin: auto; }
216
+ .log-box { font-family: monospace; white-space: pre; }
217
+ """
218
 
219
+ with gr.Blocks(title="HF Space Auto‑Builder", css=custom_css) as demo:
220
+ with gr.Row():
221
+ # Sidebar
222
+ with gr.Column(scale=1, min_width=250):
223
+ login_btn = gr.LoginButton(variant="huggingface", size="sm")
224
+ status_md = gr.Markdown("*Not logged in.*")
225
+ models_md = gr.Markdown()
226
+ sdk_choice = gr.Radio(
227
+ ["gradio", "streamlit"],
228
+ value="gradio",
229
+ label="SDK template"
230
+ )
231
+ api_key = gr.Textbox(label="Gemini API Key", type="password")
232
+ grounding = gr.Checkbox(label="Enable grounding", value=False)
233
+ temp = gr.Slider(0, 1, value=0.2, label="LLM temperature")
234
+ max_tokens = gr.Slider(256, 4096, value=1024, step=256, label="Max tokens")
235
+ clear_btn = gr.Button("Clear Chat", variant="secondary")
236
+
237
+ # Main area
238
+ with gr.Column(scale=3):
239
+ gr.Markdown("## 🐢 Wuhp Auto‑Builder\nSign in, enter your prompt, and watch your Space live.")
240
+ chatbot = gr.Chatbot(elem_id="chatbot")
241
+ user_in = gr.Textbox(
242
+ placeholder="Type your prompt and hit Enter…",
243
+ label="Prompt", lines=1
244
+ )
245
+ send_btn = gr.Button("Send", variant="primary", show_progress=True)
246
+
247
+ with gr.Tabs():
248
+ with gr.TabItem("Chat"):
249
+ pass
250
+ with gr.TabItem("Logs"):
251
+ with gr.Accordion("🔨 Build Logs", open=True):
252
+ build_box = gr.Textbox(
253
+ lines=8, interactive=False, elem_classes="log-box"
254
+ )
255
+ with gr.Accordion("🚀 Run Logs", open=False):
256
+ run_box = gr.Textbox(
257
+ lines=8, interactive=False, elem_classes="log-box"
258
+ )
259
+ with gr.TabItem("Preview"):
260
+ preview = gr.HTML("<p>No Space yet.</p>")
261
+
262
+ # Wiring callbacks
263
+ demo.load(show_profile, inputs=None, outputs=status_md)
264
  demo.load(list_private_models, inputs=None, outputs=models_md)
265
+ login_btn.click(show_profile, inputs=None, outputs=status_md)
266
  login_btn.click(list_private_models, inputs=None, outputs=models_md)
267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
  send_btn.click(
269
  fn=handle_user_message,
270
+ inputs=[chatbot, sdk_choice, api_key, grounding, temp, max_tokens, status_md, models_md],
271
+ outputs=[chatbot, build_box, run_box, preview]
272
+ )
273
+ user_in.submit(
274
+ fn=handle_user_message,
275
+ inputs=[chatbot, sdk_choice, api_key, grounding, temp, max_tokens, status_md, models_md],
276
+ outputs=[chatbot, build_box, run_box, preview]
277
+ )
278
+
279
+ def _clear_chat():
280
+ return [], "", "", "<p>No Space yet.</p>"
281
+ clear_btn.click(
282
+ fn=_clear_chat,
283
+ inputs=None,
284
  outputs=[chatbot, build_box, run_box, preview]
285
  )
286
 
287
+ # Refresh Logs
288
+ refresh_btn = gr.Button("Refresh Logs", variant="secondary", size="sm")
289
+ def _refresh(profile, oauth_token):
290
  if not profile or not oauth_token:
291
  return "", ""
292
  repo = f"{profile.username}/{profile.username}-auto-space"
293
  return fetch_logs(repo, "build"), fetch_logs(repo, "run")
 
 
 
294
  refresh_btn.click(_refresh, inputs=None, outputs=[build_box, run_box])
295
 
296
  demo.launch(server_name="0.0.0.0", server_port=7860)