wuhp commited on
Commit
dbd6fa0
·
verified ·
1 Parent(s): 3290861

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +520 -189
app.py CHANGED
@@ -1,5 +1,3 @@
1
- # app.py
2
-
3
  import os
4
  import re
5
  import time
@@ -37,16 +35,19 @@ def list_private_models(
37
 
38
  def create_space_action(repo_name: str, sdk: str, profile: gr.OAuthProfile, token: gr.OAuthToken):
39
  repo_id = f"{profile.username}/{repo_name}"
40
- create_repo(
41
- repo_id=repo_id,
42
- token=token.token,
43
- exist_ok=True,
44
- repo_type="space",
45
- space_sdk=sdk
46
- )
47
- url = f"https://huggingface.co/spaces/{repo_id}"
48
- iframe = f'<iframe src="{url}" width="100%" height="500px"></iframe>'
49
- return repo_id, iframe
 
 
 
50
 
51
  def upload_file_to_space_action(
52
  file_obj,
@@ -54,9 +55,9 @@ def upload_file_to_space_action(
54
  repo_id: str,
55
  profile: gr.OAuthProfile,
56
  token: gr.OAuthToken
57
- ) -> str:
58
  if not (profile and token and repo_id):
59
- return "⚠️ Please log in and create a Space first."
60
  try:
61
  upload_file(
62
  path_or_fileobj=file_obj,
@@ -65,36 +66,42 @@ def upload_file_to_space_action(
65
  token=token.token,
66
  repo_type="space"
67
  )
68
- return f"✅ Uploaded `{path_in_repo}`"
69
  except Exception as e:
70
- return f"Error uploading `{path_in_repo}`: {e}"
71
 
72
  def _fetch_space_logs_level(repo_id: str, level: str, token: str) -> str:
 
 
73
  jwt_url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
74
- r = get_session().get(jwt_url, headers=build_hf_headers(token=token))
75
- hf_raise_for_status(r)
76
- jwt = r.json()["token"]
77
- logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
78
- lines, count = [], 0
79
- with get_session().get(logs_url, headers=build_hf_headers(token=jwt), stream=True, timeout=20) as resp:
80
- hf_raise_for_status(resp)
81
- for raw in resp.iter_lines():
82
- if count >= 200:
83
- lines.append("... truncated ...")
84
- break
85
- if not raw.startswith(b"data: "):
86
- continue
87
- payload = raw[len(b"data: "):]
88
- try:
89
- event = json.loads(payload.decode())
90
- ts = event.get("timestamp", "")
91
- txt = event.get("data", "").strip()
92
- if txt:
93
- lines.append(f"[{ts}] {txt}")
94
- count += 1
95
- except json.JSONDecodeError:
96
- continue
97
- return "\n".join(lines) if lines else f"No {level} logs found."
 
 
 
 
 
98
 
99
  def get_build_logs_action(repo_id, profile, token):
100
  if not (repo_id and profile and token):
@@ -104,6 +111,8 @@ def get_build_logs_action(repo_id, profile, token):
104
  def get_container_logs_action(repo_id, profile, token):
105
  if not (repo_id and profile and token):
106
  return "⚠️ Please log in and create a Space first."
 
 
107
  return _fetch_space_logs_level(repo_id, "run", token.token)
108
 
109
 
@@ -116,19 +125,52 @@ def configure_gemini(api_key: str | None, model_name: str | None) -> str:
116
  return "Please select a Gemini model."
117
  try:
118
  genai.configure(api_key=api_key)
119
- genai.GenerativeModel(model_name).generate_content("ping")
 
120
  return f"Gemini configured successfully with **{model_name}**."
121
  except Exception as e:
122
  return f"Error configuring Gemini: {e}"
123
 
124
  def call_gemini(prompt: str, api_key: str, model_name: str) -> str:
125
- genai.configure(api_key=api_key)
126
- model = genai.GenerativeModel(model_name)
127
- response = model.generate_content(prompt)
128
- return response.text or ""
129
-
130
-
131
- # --- AI workflow logic ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
 
133
  def ai_workflow_chat(
134
  message: str,
@@ -137,152 +179,391 @@ def ai_workflow_chat(
137
  hf_token: gr.OAuthToken | None,
138
  gemini_api_key: str | None,
139
  gemini_model: str | None,
140
- repo_id_state: str | None,
141
  workflow_state: str,
142
  space_sdk: str,
143
  preview_html: str,
144
  container_logs: str,
145
- build_logs: str
 
 
 
 
146
  ) -> tuple[
147
- list[list[str | None]],
148
- str | None,
149
- str,
150
- str,
151
- str,
152
- str
 
 
 
 
153
  ]:
154
- history.append([message, None])
155
- bot_message = ""
156
- repo_id = repo_id_state
157
- state = workflow_state
 
 
 
 
158
  updated_preview = preview_html
159
- updated_build = build_logs
160
- updated_run = container_logs
 
 
 
 
 
 
161
 
162
  try:
163
- # 1) Ensure login & API key
164
- if not hf_profile or not hf_token:
165
- bot_message = "Please log in to Hugging Face first."
166
- state = "awaiting_login"
167
- elif not gemini_api_key or not gemini_model:
168
- bot_message = "Please enter your API key and select a Gemini model."
169
- state = "awaiting_api_key"
170
-
171
- # 2) Auto-detect “generate me ... app called NAME”
172
- elif state == "idle" and re.search(r'generate (?:me )?(?:a|an) \w+ app called (\w+)', message, re.I):
173
- m = re.search(r'generate (?:me )?(?:a|an) \w+ app called (\w+)', message, re.I)
174
- repo_name = m.group(1)
175
- bot_message = f"Creating Space `{hf_profile.username}/{repo_name}`..."
176
- repo_id, iframe_html = create_space_action(repo_name, space_sdk, hf_profile, hf_token)
177
- updated_preview = iframe_html
178
- bot_message += "\n✅ Space created. Generating and uploading code..."
179
- state = "generating_all"
180
- app_desc = message
181
-
182
- # 3) Manual start: waiting for repo name
183
- elif (state in ("idle","awaiting_login","awaiting_api_key") or ("create" in message.lower())) and not repo_id:
184
- bot_message = "What should the Space be called? (e.g., `my-awesome-app`)"
185
- state = "awaiting_repo_name"
186
-
187
- elif state == "awaiting_repo_name":
188
- repo_name = message.strip()
189
- bot_message = f"Creating Space `{hf_profile.username}/{repo_name}`..."
190
- repo_id, iframe_html = create_space_action(repo_name, space_sdk, hf_profile, hf_token)
191
- updated_preview = iframe_html
192
- bot_message += "\n✅ Space created."
193
- state = "generating_all"
194
- app_desc = None
195
-
196
- # 4) Generate code + requirements + README + deploy
197
- if state == "generating_all":
198
- # generate app.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
  prompt = f"""
200
  You are an AI assistant specializing in Hugging Face Spaces using the {space_sdk} SDK.
201
  Generate a full, single-file Python app based on:
202
- '{app_desc or 'a Gradio image-blur test app with upload and slider controls'}'
203
- Return **only** the python code block for app.py.
204
  """
205
- code = call_gemini(prompt, gemini_api_key, gemini_model)
206
- code = code.strip().strip("```python").strip("```")
207
- # upload app.py
208
- upload_file_to_space_action(io.StringIO(code), "app.py", repo_id, hf_profile, hf_token)
209
- # generate requirements.txt
210
- reqs = "\n".join([
211
- "gradio",
212
- "google-generativeai",
213
- "huggingface_hub",
214
- "requests"
215
- ]) + "\n"
216
- upload_file_to_space_action(io.StringIO(reqs), "requirements.txt", repo_id, hf_profile, hf_token)
217
- # generate README.md
218
- readme = f"# {repo_id.split('/')[-1]}\n\n" \
219
- "This Hugging Face Space was generated by an AI.\n\n" \
220
- "## Usage\n\n" \
221
- "Upload an image and use the slider to control blur intensity.\n"
222
- upload_file_to_space_action(io.StringIO(readme), "README.md", repo_id, hf_profile, hf_token)
223
-
224
- bot_message += "\n✅ All files uploaded. Building and checking logs..."
225
- state = "checking_logs"
226
-
227
- # 5) Fetch logs and auto-debug loop
228
- if state == "checking_logs":
229
- updated_build = get_build_logs_action(repo_id, hf_profile, hf_token)
230
- updated_run = get_container_logs_action(repo_id, hf_profile, hf_token)
231
- # if errors detected, auto-debug up to 3 attempts
232
- attempts = 0
233
- while attempts < 3 and ( "Error" in updated_run or "Exception" in updated_run ):
234
- attempts += 1
235
- bot_message += f"\n🔧 Debug attempt #{attempts}"
236
- debug_prompt = f"""
237
- You are debugging a {space_sdk} Space.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
  Here are the container logs:
239
  {updated_run}
240
 
241
- Generate a fixed version of app.py only. Return the python code block.
 
242
  """
243
- fix_code = call_gemini(debug_prompt, gemini_api_key, gemini_model)
244
- fix_code = fix_code.strip().strip("```python").strip("```")
245
- upload_file_to_space_action(io.StringIO(fix_code), "app.py", repo_id, hf_profile, hf_token)
246
- time.sleep(5) # wait for rebuild
247
- updated_run = get_container_logs_action(repo_id, hf_profile, hf_token)
248
- if "Error" not in updated_run and "Exception" not in updated_run:
249
- bot_message += "\n✅ Application deployed successfully!"
250
- else:
251
- bot_message += "\n❌ Could not fully debug after 3 attempts. Please check logs."
252
 
253
- state = "idle"
 
254
 
255
- # 6) Reset workflow
256
- if "reset" in message.lower():
257
- bot_message = "Workflow reset."
258
- repo_id = None
259
- updated_preview = "<p>No Space created yet.</p>"
260
- updated_run = ""
261
- updated_build = ""
262
- state = "idle"
263
 
264
- # Catch-all
265
- if not bot_message:
266
- bot_message = "Command not recognized. Try 'generate me a gradio app called myapp', or 'reset'."
267
 
268
- except Exception as e:
269
- bot_message = f"Unexpected error: {e}"
270
- state = "idle"
271
 
272
- history[-1][1] = bot_message
273
- return history, repo_id, state, updated_preview, updated_run, updated_build
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274
 
275
 
276
  # --- Build the Gradio UI ---
277
 
278
  with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
 
279
  hf_profile = gr.State(None)
280
  hf_token = gr.State(None)
281
  gemini_key = gr.State(None)
282
- gemini_model = gr.State("gemini-2.5-flash-preview-04-17")
283
- repo_id = gr.State(None)
284
- workflow = gr.State("idle")
285
- sdk_state = gr.State("gradio")
 
 
 
 
286
 
287
  with gr.Row():
288
  # Sidebar
@@ -290,13 +571,19 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
290
  gr.Markdown("## Hugging Face Login")
291
  login_status = gr.Markdown("*Not logged in.*")
292
  login_btn = gr.LoginButton(variant="huggingface")
 
 
293
  ai_builder_tab.load(show_profile, outputs=login_status)
 
294
  login_btn.click(show_profile, outputs=login_status)
 
295
  login_btn.click(lambda p, t: (p, t), outputs=[hf_profile, hf_token])
296
 
297
  gr.Markdown("## Google AI Studio API Key")
298
- gemini_input = gr.Textbox(label="API Key", type="password")
299
  gemini_status = gr.Markdown("")
 
 
300
  gemini_input.change(lambda k: k, inputs=gemini_input, outputs=gemini_key)
301
 
302
  gr.Markdown("## Gemini Model")
@@ -304,15 +591,18 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
304
  choices=[
305
  ("Gemini 2.5 Flash Preview 04-17", "gemini-2.5-flash-preview-04-17"),
306
  ("Gemini 2.5 Pro Preview 03-25", "gemini-2.5-pro-preview-03-25"),
307
- ("Gemini 2.0 Flash", "gemini-2.0-flash"),
308
  ("Gemini 2.0 Flash‑Lite", "gemini-2.0-flash-lite"),
309
  ("Gemini 1.5 Flash", "gemini-1.5-flash"),
310
  ],
311
  value="gemini-2.5-flash-preview-04-17",
312
- label="Select model"
 
313
  )
 
314
  model_selector.change(lambda m: m, inputs=model_selector, outputs=gemini_model)
315
 
 
316
  ai_builder_tab.load(
317
  configure_gemini,
318
  inputs=[gemini_key, gemini_model],
@@ -329,62 +619,103 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
329
  outputs=[gemini_status]
330
  )
331
 
 
332
  gr.Markdown("## Space SDK")
333
- sdk_selector = gr.Radio(choices=["gradio","streamlit"], value="gradio", label="Template SDK")
334
  sdk_selector.change(lambda s: s, inputs=sdk_selector, outputs=sdk_state)
335
 
 
 
 
 
336
  # Main content
337
  with gr.Column(scale=3):
338
  chatbot = gr.Chatbot()
339
- user_input = gr.Textbox(placeholder="Type your message…")
340
  send_btn = gr.Button("Send", interactive=False)
341
 
 
 
 
 
 
 
 
 
 
342
  ai_builder_tab.load(
343
- lambda p, k, m: gr.update(interactive=bool(p and k and m)),
344
- inputs=[hf_profile, gemini_key, gemini_model],
345
  outputs=[send_btn]
346
  )
347
  login_btn.click(
348
- lambda p, k, m: gr.update(interactive=bool(p and k and m)),
349
- inputs=[hf_profile, gemini_key, gemini_model],
350
  outputs=[send_btn]
351
  )
352
  gemini_input.change(
353
- lambda p, k, m: gr.update(interactive=bool(p and k and m)),
354
- inputs=[hf_profile, gemini_key, gemini_model],
355
  outputs=[send_btn]
356
  )
357
  model_selector.change(
358
- lambda p, k, m: gr.update(interactive=bool(p and k and m)),
359
- inputs=[hf_profile, gemini_key, gemini_model],
360
  outputs=[send_btn]
361
  )
362
 
363
- iframe = gr.HTML("<p>No Space created yet.</p>")
364
- build_txt = gr.Textbox(label="Build Logs", lines=10, interactive=False)
365
- run_txt = gr.Textbox(label="Container Logs", lines=10, interactive=False)
366
 
367
- def wrap_chat(msg, history, prof, tok, key, model, rid, wf, sdk, prev, run_l, build_l):
368
- new_hist, new_rid, new_wf, new_prev, new_run, new_build = ai_workflow_chat(
369
- msg, history, prof, tok, key, model, rid, wf, sdk, prev, run_l, build_l
370
- )
371
- return [(u or "", v or "") for u, v in new_hist], new_rid, new_wf, new_prev, new_run, new_build
372
 
 
373
  send_btn.click(
374
- wrap_chat,
375
  inputs=[
376
  user_input, chatbot,
377
  hf_profile, hf_token,
378
  gemini_key, gemini_model,
379
  repo_id, workflow, sdk_state,
380
- iframe, run_txt, build_txt
 
381
  ],
382
  outputs=[
383
  chatbot,
384
  repo_id, workflow,
385
- iframe, run_txt, build_txt
 
386
  ]
 
 
 
 
387
  )
388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
389
  if __name__ == "__main__":
390
- ai_builder_tab.launch()
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import re
3
  import time
 
35
 
36
  def create_space_action(repo_name: str, sdk: str, profile: gr.OAuthProfile, token: gr.OAuthToken):
37
  repo_id = f"{profile.username}/{repo_name}"
38
+ try:
39
+ create_repo(
40
+ repo_id=repo_id,
41
+ token=token.token,
42
+ exist_ok=True,
43
+ repo_type="space",
44
+ space_sdk=sdk
45
+ )
46
+ url = f"https://huggingface.co/spaces/{repo_id}"
47
+ iframe = f'<iframe src="{url}" width="100%" height="500px"></iframe>'
48
+ return repo_id, iframe
49
+ except Exception as e:
50
+ raise RuntimeError(f"Failed to create Space {repo_id}: {e}") # Raise instead of returning string
51
 
52
  def upload_file_to_space_action(
53
  file_obj,
 
55
  repo_id: str,
56
  profile: gr.OAuthProfile,
57
  token: gr.OAuthToken
58
+ ) -> None: # Return None on success, raise on failure
59
  if not (profile and token and repo_id):
60
+ raise ValueError("Hugging Face profile, token, or repo_id is missing.")
61
  try:
62
  upload_file(
63
  path_or_fileobj=file_obj,
 
66
  token=token.token,
67
  repo_type="space"
68
  )
 
69
  except Exception as e:
70
+ raise RuntimeError(f"Failed to upload `{path_in_repo}` to {repo_id}: {e}") # Raise exception
71
 
72
  def _fetch_space_logs_level(repo_id: str, level: str, token: str) -> str:
73
+ if not repo_id or not token:
74
+ return f"Cannot fetch {level} logs: repo_id or token missing." # Handle missing state gracefully
75
  jwt_url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
76
+ try:
77
+ r = get_session().get(jwt_url, headers=build_hf_headers(token=token))
78
+ hf_raise_for_status(r)
79
+ jwt = r.json()["token"]
80
+ logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
81
+ lines, count = [], 0
82
+ with get_session().get(logs_url, headers=build_hf_headers(token=jwt), stream=True, timeout=20) as resp:
83
+ hf_raise_for_status(resp)
84
+ for raw in resp.iter_lines():
85
+ if count >= 200:
86
+ lines.append("... truncated ...")
87
+ break
88
+ if not raw.startswith(b"data: "):
89
+ continue
90
+ payload = raw[len(b"data: "):]
91
+ try:
92
+ event = json.loads(payload.decode())
93
+ ts = event.get("timestamp", "")
94
+ txt = event.get("data", "").strip()
95
+ if txt:
96
+ lines.append(f"[{ts}] {txt}")
97
+ count += 1
98
+ except json.JSONDecodeError:
99
+ continue
100
+ return "\n".join(lines) if lines else f"No {level} logs found."
101
+ except Exception as e:
102
+ # Don't raise here, just return error message in logs box
103
+ return f"Error fetching {level} logs: {e}"
104
+
105
 
106
  def get_build_logs_action(repo_id, profile, token):
107
  if not (repo_id and profile and token):
 
111
  def get_container_logs_action(repo_id, profile, token):
112
  if not (repo_id and profile and token):
113
  return "⚠️ Please log in and create a Space first."
114
+ # Add a short delay before fetching run logs, build might just finish
115
+ time.sleep(5)
116
  return _fetch_space_logs_level(repo_id, "run", token.token)
117
 
118
 
 
125
  return "Please select a Gemini model."
126
  try:
127
  genai.configure(api_key=api_key)
128
+ # Test a simple ping
129
+ genai.GenerativeModel(model_name).generate_content("ping", stream=False) # Use stream=False for sync ping
130
  return f"Gemini configured successfully with **{model_name}**."
131
  except Exception as e:
132
  return f"Error configuring Gemini: {e}"
133
 
134
  def call_gemini(prompt: str, api_key: str, model_name: str) -> str:
135
+ if not api_key or not model_name:
136
+ raise ValueError("Gemini API key or model not set.")
137
+ try:
138
+ genai.configure(api_key=api_key)
139
+ model = genai.GenerativeModel(model_name)
140
+ response = model.generate_content(prompt, stream=False) # Use stream=False for sync call
141
+ return response.text or ""
142
+ except Exception as e:
143
+ raise RuntimeError(f"Gemini API call failed: {e}") # Raise exception
144
+
145
+
146
+ # --- AI workflow logic (State Machine) ---
147
+
148
+ # Define States
149
+ STATE_IDLE = "idle"
150
+ STATE_AWAITING_REPO_NAME = "awaiting_repo_name"
151
+ STATE_CREATING_SPACE = "creating_space"
152
+ STATE_GENERATING_CODE = "generating_code"
153
+ STATE_UPLOADING_APP_PY = "uploading_app_py"
154
+ STATE_GENERATING_REQUIREMENTS = "generating_requirements"
155
+ STATE_UPLOADING_REQUIREMENTS = "uploading_requirements"
156
+ STATE_GENERATING_README = "generating_readme"
157
+ STATE_UPLOADING_README = "uploading_readme"
158
+ STATE_CHECKING_LOGS_BUILD = "checking_logs_build"
159
+ STATE_CHECKING_LOGS_RUN = "checking_logs_run"
160
+ STATE_DEBUGGING_CODE = "debugging_code"
161
+ STATE_UPLOADING_FIXED_APP_PY = "uploading_fixed_app_py"
162
+ STATE_COMPLETE = "complete"
163
+
164
+ MAX_DEBUG_ATTEMPTS = 3
165
+
166
+ def update_chat(history, bot_message):
167
+ """Helper to add a bot message and yield state."""
168
+ # Ensure last user message is in history
169
+ if history and history[-1][1] is None:
170
+ history[-1][1] = "" # Add empty bot response to last user message if none exists
171
+
172
+ history.append([None, bot_message])
173
+ return history
174
 
175
  def ai_workflow_chat(
176
  message: str,
 
179
  hf_token: gr.OAuthToken | None,
180
  gemini_api_key: str | None,
181
  gemini_model: str | None,
182
+ repo_id_state: str | None,
183
  workflow_state: str,
184
  space_sdk: str,
185
  preview_html: str,
186
  container_logs: str,
187
+ build_logs: str,
188
+ debug_attempts_state: int,
189
+ app_description_state: str | None, # Persist initial request
190
+ repo_name_state: str | None, # Persist chosen name
191
+ generated_code_state: str | None, # Persist generated code between steps
192
  ) -> tuple[
193
+ list[list[str | None]], # history
194
+ str | None, # repo_id
195
+ str, # workflow_state
196
+ str, # preview_html
197
+ str, # container_logs
198
+ str, # build_logs
199
+ int, # debug_attempts_state
200
+ str | None, # app_description_state
201
+ str | None, # repo_name_state
202
+ str | None, # generated_code_state
203
  ]:
204
+ # Unpack state variables
205
+ repo_id = repo_id_state
206
+ state = workflow_state
207
+ attempts = debug_attempts_state
208
+ app_desc = app_description_state
209
+ repo_name = repo_name_state
210
+ generated_code = generated_code_state
211
+
212
  updated_preview = preview_html
213
+ updated_build = build_logs
214
+ updated_run = container_logs
215
+
216
+ # Add user message to history for context
217
+ history.append([message, None])
218
+ # Yield immediately to show user message
219
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
220
+
221
 
222
  try:
223
+ # --- State Machine Logic ---
224
+
225
+ if state == STATE_IDLE:
226
+ # Check prerequisites first
227
+ if not (hf_profile and hf_token):
228
+ history = update_chat(history, "Please log in to Hugging Face first.")
229
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
230
+ return # Stop workflow until login
231
+ if not (gemini_api_key and gemini_model):
232
+ history = update_chat(history, "Please enter your API key and select a Gemini model.")
233
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
234
+ return # Stop workflow until API key/model set
235
+
236
+ # Look for commands
237
+ reset_match = "reset" in message.lower()
238
+ generate_match = re.search(r'generate (?:me )?(?:a|an) \w+ app called (\w+)', message, re.I)
239
+ create_match = re.search(r'create (?:a|an)? space called (\w+)', message, re.I)
240
+
241
+ if reset_match:
242
+ history = update_chat(history, "Workflow reset.")
243
+ # Reset all state variables
244
+ yield history, None, STATE_IDLE, "<p>No Space created yet.</p>", "", "", 0, None, None, None
245
+ return # End workflow
246
+
247
+ elif generate_match:
248
+ new_repo_name = generate_match.group(1)
249
+ new_app_desc = message # Store the full request
250
+ history = update_chat(history, f"Acknowledged: '{message}'. Starting workflow to create Space `{hf_profile.username}/{new_repo_name}`.")
251
+ # Transition to creating space state, passing name and description
252
+ yield history, repo_id, STATE_CREATING_SPACE, updated_preview, updated_run, updated_build, attempts, new_app_desc, new_repo_name, generated_code
253
+
254
+ elif create_match:
255
+ new_repo_name = create_match.group(1)
256
+ history = update_chat(history, f"Acknowledged: '{message}'. Starting workflow to create Space `{hf_profile.username}/{new_repo_name}`.")
257
+ # Transition to creating space state, just passing the name (desc will be default)
258
+ yield history, repo_id, STATE_CREATING_SPACE, updated_preview, updated_run, updated_build, attempts, app_desc, new_repo_name, generated_code # Use existing app_desc or None
259
+
260
+ elif "create" in message.lower() and not repo_id: # Generic create trigger
261
+ history = update_chat(history, "Okay, what should the Space be called? (e.g., `my-awesome-app`)")
262
+ # Transition to awaiting name state
263
+ yield history, repo_id, STATE_AWAITING_REPO_NAME, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Stay in this state
264
+
265
+ else:
266
+ # Handle other chat messages if needed, or just respond unknown
267
+ history = update_chat(history, "Command not recognized. Try 'generate me a gradio app called myapp', or 'reset'.")
268
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
269
+ # Stay in IDLE state
270
+
271
+ elif state == STATE_AWAITING_REPO_NAME:
272
+ new_repo_name = message.strip()
273
+ if not new_repo_name or re.search(r'\s', new_repo_name): # Basic validation for repo name
274
+ history = update_chat(history, "Invalid name. Please provide a single word/slug for the Space name.")
275
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Stay in this state
276
+ else:
277
+ history = update_chat(history, f"Using Space name `{new_repo_name}`. Creating Space `{hf_profile.username}/{new_repo_name}`...")
278
+ # Transition to creating space state, pass the received name
279
+ yield history, repo_id, STATE_CREATING_SPACE, updated_preview, updated_run, updated_build, attempts, app_desc, new_repo_name, generated_code
280
+
281
+ elif state == STATE_CREATING_SPACE:
282
+ # This state is triggered when we *already have* the repo_name in state
283
+ if not repo_name: # Safety check
284
+ history = update_chat(history, "Internal error: Repo name missing for creation. Resetting.")
285
+ yield history, None, STATE_IDLE, "<p>Error creating space.</p>", "", "", 0, None, None, None
286
+ return
287
+
288
+ try:
289
+ new_repo_id, iframe_html = create_space_action(repo_name, space_sdk, hf_profile, hf_token)
290
+ updated_preview = iframe_html
291
+ history = update_chat(history, f"✅ Space `{new_repo_id}` created.")
292
+ # Transition to generating code state, update repo_id and preview
293
+ yield history, new_repo_id, STATE_GENERATING_CODE, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
294
+
295
+ except Exception as e:
296
+ history = update_chat(history, f"❌ Error creating space: {e}")
297
+ # Reset state on failure
298
+ yield history, None, STATE_IDLE, "<p>Error creating space.</p>", "", "", 0, None, None, None
299
+
300
+ elif state == STATE_GENERATING_CODE:
301
+ # Use the stored app description or a default
302
+ prompt_desc = app_desc if app_desc else 'a Gradio image-blur test app with upload and slider controls'
303
  prompt = f"""
304
  You are an AI assistant specializing in Hugging Face Spaces using the {space_sdk} SDK.
305
  Generate a full, single-file Python app based on:
306
+ '{prompt_desc}'
307
+ Return **only** the python code block for app.py. Do not include any extra text, explanations, or markdown outside the code block.
308
  """
309
+ try:
310
+ history = update_chat(history, "🧠 Generating `app.py` code with Gemini...")
311
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Update UI to show "Generating..."
312
+
313
+ code = call_gemini(prompt, gemini_api_key, gemini_model)
314
+ # Clean markdown and whitespace
315
+ code = code.strip()
316
+ if code.startswith("```python"):
317
+ code = code[len("```python"):].strip()
318
+ if code.endswith("```"):
319
+ code = code[:-len("```")].strip()
320
+
321
+ if not code:
322
+ raise ValueError("Gemini returned empty code.")
323
+
324
+ history = update_chat(history, " `app.py` code generated. Click 'Send' to upload.")
325
+ # Transition to uploading state, store the generated code
326
+ yield history, repo_id, STATE_UPLOADING_APP_PY, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, code # Pass code in state
327
+
328
+ except Exception as e:
329
+ history = update_chat(history, f"❌ Error generating code: {e}. Click 'reset'.")
330
+ # Reset state on failure
331
+ yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
332
+
333
+ elif state == STATE_UPLOADING_APP_PY:
334
+ # Use the generated_code stored in state
335
+ if not generated_code:
336
+ history = update_chat(history, "Internal error: No code to upload. Resetting.")
337
+ yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
338
+ return
339
+
340
+ history = update_chat(history, "☁️ Uploading `app.py`...")
341
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, None # Yield to show message, clear generated_code state
342
+
343
+ try:
344
+ upload_file_to_space_action(io.StringIO(generated_code), "app.py", repo_id, hf_profile, hf_token)
345
+ history = update_chat(history, "✅ Uploaded `app.py`. Click 'Send' to generate requirements.")
346
+ # Transition to generating requirements
347
+ yield history, repo_id, STATE_GENERATING_REQUIREMENTS, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, None # Keep generated_code as None
348
+
349
+ except Exception as e:
350
+ history = update_chat(history, f"❌ Error uploading app.py: {e}. Click 'reset'.")
351
+ yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
352
+
353
+ elif state == STATE_GENERATING_REQUIREMENTS:
354
+ history = update_chat(history, "📄 Generating `requirements.txt`...")
355
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Yield to show message
356
+
357
+ # Simple heuristic for requirements based on SDK and common needs
358
+ reqs_list = ["gradio"] if space_sdk == "gradio" else ["streamlit"]
359
+ # Add common deps if likely used (could parse code, but simpler heuristic for demo)
360
+ if "google.generativeai" in (generated_code or "") or gemini_api_key: # Check if Gemini was used for code or if key is set
361
+ reqs_list.append("google-generativeai")
362
+ if "requests" in (generated_code or ""):
363
+ reqs_list.append("requests")
364
+ reqs_list.append("huggingface_hub") # Needed for log fetching etc if done inside the space itself (though not in this current app's space code)
365
+
366
+ reqs_content = "\n".join(reqs_list) + "\n"
367
+
368
+ history = update_chat(history, "✅ `requirements.txt` generated. Click 'Send' to upload.")
369
+ # Transition to uploading requirements, store content temporarily
370
+ yield history, repo_id, STATE_UPLOADING_REQUIREMENTS, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, reqs_content # Pass content in state (abusing generated_code slot)
371
+
372
+
373
+ elif state == STATE_UPLOADING_REQUIREMENTS:
374
+ # Use content stored in state (abusing generated_code slot)
375
+ reqs_content_to_upload = generated_code # Get content from state
376
+ if not reqs_content_to_upload:
377
+ history = update_chat(history, "Internal error: No requirements content to upload. Resetting.")
378
+ yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
379
+ return
380
+
381
+ history = update_chat(history, "☁️ Uploading `requirements.txt`...")
382
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, None # Yield, clear temp state
383
+
384
+ try:
385
+ upload_file_to_space_action(io.StringIO(reqs_content_to_upload), "requirements.txt", repo_id, hf_profile, hf_token)
386
+ history = update_chat(history, "✅ Uploaded `requirements.txt`. Click 'Send' to generate README.")
387
+ # Transition to generating README
388
+ yield history, repo_id, STATE_GENERATING_README, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, None
389
+
390
+ except Exception as e:
391
+ history = update_chat(history, f"❌ Error uploading requirements.txt: {e}. Click 'reset'.")
392
+ yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
393
+
394
+ elif state == STATE_GENERATING_README:
395
+ history = update_chat(history, "📝 Generating `README.md`...")
396
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Yield to show message
397
+
398
+ # Generate a simple README based on app_desc or repo_name
399
+ readme_title = repo_name if repo_name else "My Awesome Space"
400
+ readme_description = app_desc if app_desc else "This Hugging Face Space hosts an AI-generated application."
401
+
402
+ readme_content = f"# {readme_title}\n\n{readme_description}\n\n" \
403
+ "This Space was automatically generated by an AI workflow.\n"
404
+
405
+ history = update_chat(history, "✅ `README.md` generated. Click 'Send' to upload.")
406
+ # Transition to uploading README, store content temporarily
407
+ yield history, repo_id, STATE_UPLOADING_README, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, readme_content # Pass content in state
408
+
409
+
410
+ elif state == STATE_UPLOADING_README:
411
+ # Use content stored in state (abusing generated_code slot)
412
+ readme_content_to_upload = generated_code # Get content from state
413
+ if not readme_content_to_upload:
414
+ history = update_chat(history, "Internal error: No README content to upload. Resetting.")
415
+ yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
416
+ return
417
+
418
+ history = update_chat(history, "☁️ Uploading `README.md`...")
419
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, None # Yield, clear temp state
420
+
421
+ try:
422
+ upload_file_to_space_action(io.StringIO(readme_content_to_upload), "README.md", repo_id, hf_profile, hf_token)
423
+ history = update_chat(history, "✅ Uploaded `README.md`. Files uploaded. Space is now building. Click 'Send' to check build logs.")
424
+ # Transition to checking build logs
425
+ yield history, repo_id, STATE_CHECKING_LOGS_BUILD, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, None
426
+
427
+ except Exception as e:
428
+ history = update_chat(history, f"❌ Error uploading README.md: {e}. Click 'reset'.")
429
+ yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
430
+
431
+
432
+ elif state == STATE_CHECKING_LOGS_BUILD:
433
+ # Optional: Add a short delay here if needed, but fetch action includes timeout
434
+ history = update_chat(history, "🔍 Fetching build logs...")
435
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Show message
436
+
437
+ build_logs_text = get_build_logs_action(repo_id, hf_profile, hf_token)
438
+ updated_build = build_logs_text
439
+
440
+ # Simple check: if build logs contain "Error" or "Exception", might indicate build issue.
441
+ # More robust would involve checking build status via API, but logs are simpler for demo.
442
+ # Assuming successful build leads to container logs check.
443
+ if "Error" in updated_build or "Exception" in updated_build:
444
+ history = update_chat(history, "⚠️ Build logs may contain errors. Please inspect. Click 'Send' to check container logs (app might still start).")
445
+ yield history, repo_id, STATE_CHECKING_LOGS_RUN, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Transition to run logs check
446
+
447
+ else:
448
+ history = update_chat(history, "✅ Build logs fetched. Click 'Send' to check container logs.")
449
+ yield history, repo_id, STATE_CHECKING_LOGS_RUN, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Transition to run logs check
450
+
451
+
452
+ elif state == STATE_CHECKING_LOGS_RUN:
453
+ history = update_chat(history, "🔍 Fetching container logs...")
454
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Show message
455
+
456
+ container_logs_text = get_container_logs_action(repo_id, hf_profile, hf_token)
457
+ updated_run = container_logs_text
458
+
459
+ # Check for errors/exceptions in run logs
460
+ if ("Error" in updated_run or "Exception" in updated_run) and attempts < MAX_DEBUG_ATTEMPTS:
461
+ attempts += 1
462
+ history = update_chat(history, f"❌ Errors detected in container logs. Attempting debug fix #{attempts}/{MAX_DEBUG_ATTEMPTS}. Click 'Send' to proceed.")
463
+ # Transition to debugging state, increment attempts
464
+ yield history, repo_id, STATE_DEBUGGING_CODE, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Keep other states same
465
+
466
+ elif ("Error" in updated_run or "Exception" in updated_run) and attempts >= MAX_DEBUG_ATTEMPTS:
467
+ history = update_chat(history, f"❌ Errors detected in container logs. Max debug attempts ({MAX_DEBUG_ATTEMPTS}) reached. Please inspect logs manually or click 'reset'.")
468
+ # Transition to complete/idle after failed attempts
469
+ yield history, repo_id, STATE_COMPLETE, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
470
+
471
+ else:
472
+ history = update_chat(history, "✅ App appears to be running successfully! Check the iframe above. Click 'reset' to start a new project.")
473
+ # Transition to complete/idle on success
474
+ yield history, repo_id, STATE_COMPLETE, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
475
+
476
+ elif state == STATE_DEBUGGING_CODE:
477
+ history = update_chat(history, f"🧠 Calling Gemini to generate fix based on logs...")
478
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code # Show message
479
+
480
+ debug_prompt = f"""
481
+ You are debugging a {space_sdk} Space. The goal is to fix the code in `app.py` based on the container logs provided.
482
+
483
  Here are the container logs:
484
  {updated_run}
485
 
486
+ Generate the *complete, fixed* content for `app.py` based on these logs.
487
+ Return **only** the python code block for app.py. Do not include any extra text, explanations, or markdown outside the code block.
488
  """
489
+ try:
490
+ fix_code = call_gemini(debug_prompt, gemini_api_key, gemini_model)
491
+ # Clean markdown and whitespace
492
+ fix_code = fix_code.strip()
493
+ if fix_code.startswith("```python"):
494
+ fix_code = fix_code[len("```python"):].strip()
495
+ if fix_code.endswith("```"):
496
+ fix_code = fix_code[:-len("```")].strip()
 
497
 
498
+ if not fix_code:
499
+ raise ValueError("Gemini returned empty fix code.")
500
 
 
 
 
 
 
 
 
 
501
 
502
+ history = update_chat(history, "✅ Fix code generated. Click 'Send' to upload.")
503
+ # Transition to uploading fixed code, pass the fixed code
504
+ yield history, repo_id, STATE_UPLOADING_FIXED_APP_PY, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, fix_code # Pass fix_code in state
505
 
 
 
 
506
 
507
+ except Exception as e:
508
+ history = update_chat(history, f"❌ Error generating debug code: {e}. Click 'reset'.")
509
+ # Reset on failure
510
+ yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
511
+
512
+ elif state == STATE_UPLOADING_FIXED_APP_PY:
513
+ # Use the fixed code stored in state (abusing generated_code slot)
514
+ fixed_code_to_upload = generated_code # Get code from state
515
+ if not fixed_code_to_upload:
516
+ history = update_chat(history, "Internal error: No fixed code available to upload. Resetting.")
517
+ yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
518
+ return
519
+
520
+ history = update_chat(history, "☁️ Uploading fixed `app.py`...")
521
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, None # Yield, clear temp state
522
+
523
+ try:
524
+ upload_file_to_space_action(io.StringIO(fixed_code_to_upload), "app.py", repo_id, hf_profile, hf_token)
525
+ history = update_chat(history, "✅ Fixed `app.py` uploaded. Space will rebuild. Click 'Send' to check logs again.")
526
+ # Transition back to checking run logs (after rebuild)
527
+ yield history, repo_id, STATE_CHECKING_LOGS_RUN, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, None # Keep generated_code as None
528
+
529
+ except Exception as e:
530
+ history = update_chat(history, f"❌ Error uploading fixed app.py: {e}. Click 'reset'.")
531
+ yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
532
+
533
+ elif state == STATE_COMPLETE:
534
+ # App is successfully deployed or failed after attempts.
535
+ # User should click reset or start a new command.
536
+ # Just yield the current state.
537
+ yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
538
+
539
+ else: # Should not happen
540
+ history = update_chat(history, f"Internal error: Unknown state '{state}'. Resetting.")
541
+ yield history, None, STATE_IDLE, "<p>Unknown state error.</p>", "", "", 0, None, None, None
542
+
543
+
544
+ except Exception as e:
545
+ # Catch-all for unexpected exceptions in any state
546
+ history = update_chat(history, f"Workflow step failed unexpectedly: {e}. Click 'Send' to re-attempt this step or 'reset'.")
547
+ # Stay in the current state to allow re-attempt, or maybe go to idle?
548
+ # Let's go to idle on unexpected errors to prevent getting stuck.
549
+ yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
550
 
551
 
552
  # --- Build the Gradio UI ---
553
 
554
  with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
555
+ # State variables
556
  hf_profile = gr.State(None)
557
  hf_token = gr.State(None)
558
  gemini_key = gr.State(None)
559
+ gemini_model = gr.State("gemini-2.5-flash-preview-04-17") # Default model
560
+ repo_id = gr.State(None) # ID of the created Space (e.g., 'user/repo')
561
+ workflow = gr.State(STATE_IDLE) # Current state of the AI workflow
562
+ sdk_state = gr.State("gradio") # Selected SDK
563
+ debug_attempts = gr.State(0) # Counter for debug attempts
564
+ app_description = gr.State(None) # Stores the user's original request string
565
+ repo_name_state = gr.State(None) # Stores the parsed repo name
566
+ generated_code_state = gr.State(None) # Temporarily stores generated code or file content
567
 
568
  with gr.Row():
569
  # Sidebar
 
571
  gr.Markdown("## Hugging Face Login")
572
  login_status = gr.Markdown("*Not logged in.*")
573
  login_btn = gr.LoginButton(variant="huggingface")
574
+
575
+ # Initial load to check login status
576
  ai_builder_tab.load(show_profile, outputs=login_status)
577
+ # Update status on login click
578
  login_btn.click(show_profile, outputs=login_status)
579
+ # Store profile and token in state on login click
580
  login_btn.click(lambda p, t: (p, t), outputs=[hf_profile, hf_token])
581
 
582
  gr.Markdown("## Google AI Studio API Key")
583
+ gemini_input = gr.Textbox(label="API Key", type="password", interactive=True) # Ensure interactive
584
  gemini_status = gr.Markdown("")
585
+
586
+ # Update key in state
587
  gemini_input.change(lambda k: k, inputs=gemini_input, outputs=gemini_key)
588
 
589
  gr.Markdown("## Gemini Model")
 
591
  choices=[
592
  ("Gemini 2.5 Flash Preview 04-17", "gemini-2.5-flash-preview-04-17"),
593
  ("Gemini 2.5 Pro Preview 03-25", "gemini-2.5-pro-preview-03-25"),
594
+ ("Gemini 2.0 Flash", "gemini-2.0-flash"), # Add more recent models if available
595
  ("Gemini 2.0 Flash‑Lite", "gemini-2.0-flash-lite"),
596
  ("Gemini 1.5 Flash", "gemini-1.5-flash"),
597
  ],
598
  value="gemini-2.5-flash-preview-04-17",
599
+ label="Select model",
600
+ interactive=True # Ensure interactive
601
  )
602
+ # Update model in state
603
  model_selector.change(lambda m: m, inputs=model_selector, outputs=gemini_model)
604
 
605
+ # Configure Gemini status on load and when key/model changes
606
  ai_builder_tab.load(
607
  configure_gemini,
608
  inputs=[gemini_key, gemini_model],
 
619
  outputs=[gemini_status]
620
  )
621
 
622
+
623
  gr.Markdown("## Space SDK")
624
+ sdk_selector = gr.Radio(choices=["gradio","streamlit"], value="gradio", label="Template SDK", interactive=True)
625
  sdk_selector.change(lambda s: s, inputs=sdk_selector, outputs=sdk_state)
626
 
627
+ gr.Markdown("## Workflow Status")
628
+ status_text = gr.Textbox(label="Current State", value=STATE_IDLE, interactive=False)
629
+ repo_id_text = gr.Textbox(label="Current Space ID", value="None", interactive=False)
630
+
631
  # Main content
632
  with gr.Column(scale=3):
633
  chatbot = gr.Chatbot()
634
+ user_input = gr.Textbox(placeholder="Type your message…", interactive=True) # Ensure interactive
635
  send_btn = gr.Button("Send", interactive=False)
636
 
637
+ # Logic to enable send button only when logged in and API key is set
638
+ # This logic is handled reactively by the load and change events below
639
+ def update_send_button_state(profile, token, key, model):
640
+ is_logged_in = profile is not None and token is not None
641
+ is_gemini_ready = key is not None and model is not None
642
+ # Simple check - a more robust check would involve calling configure_gemini
643
+ # but for UI responsiveness, this is okay.
644
+ return gr.update(interactive=is_logged_in and is_gemini_ready)
645
+
646
  ai_builder_tab.load(
647
+ update_send_button_state,
648
+ inputs=[hf_profile, hf_token, gemini_key, gemini_model],
649
  outputs=[send_btn]
650
  )
651
  login_btn.click(
652
+ update_send_button_state,
653
+ inputs=[hf_profile, hf_token, gemini_key, gemini_model],
654
  outputs=[send_btn]
655
  )
656
  gemini_input.change(
657
+ update_send_button_state,
658
+ inputs=[hf_profile, hf_token, gemini_key, gemini_model],
659
  outputs=[send_btn]
660
  )
661
  model_selector.change(
662
+ update_send_button_state,
663
+ inputs=[hf_profile, hf_token, gemini_key, gemini_model],
664
  outputs=[send_btn]
665
  )
666
 
 
 
 
667
 
668
+ iframe = gr.HTML("<p>No Space created yet.</p>")
669
+ build_txt = gr.Textbox(label="Build Logs", lines=10, interactive=False, value="") # Initialize empty
670
+ run_txt = gr.Textbox(label="Container Logs", lines=10, interactive=False, value="") # Initialize empty
 
 
671
 
672
+ # The main event handler for the Send button
673
  send_btn.click(
674
+ ai_workflow_chat,
675
  inputs=[
676
  user_input, chatbot,
677
  hf_profile, hf_token,
678
  gemini_key, gemini_model,
679
  repo_id, workflow, sdk_state,
680
+ iframe, run_txt, build_txt, # Pass current UI values
681
+ debug_attempts, app_description, repo_name_state, generated_code_state # Pass state variables
682
  ],
683
  outputs=[
684
  chatbot,
685
  repo_id, workflow,
686
+ iframe, run_txt, build_txt, # Update UI values
687
+ debug_attempts, app_description, repo_name_state, generated_code_state # Update state variables
688
  ]
689
+ ).success( # Clear input after successful send
690
+ lambda: gr.update(value=""),
691
+ inputs=None,
692
+ outputs=user_input
693
  )
694
 
695
+ # Link state variables to UI status displays
696
+ workflow.change(lambda s: s, inputs=workflow, outputs=status_text)
697
+ repo_id.change(lambda r: r if r else "None", inputs=repo_id, outputs=repo_id_text)
698
+ # Update logs and iframe when their state variables change (e.g., after fetch/create)
699
+ iframe.change(lambda h: h, inputs=iframe, outputs=iframe) # Not strictly needed, already linked via click outputs
700
+ build_txt.change(lambda t: t, inputs=build_txt, outputs=build_txt) # Not strictly needed
701
+ run_txt.change(lambda t: t, inputs=run_txt, outputs=run_txt) # Not strictly needed
702
+
703
+
704
+ # Add an initial message to the chatbot on load
705
+ def greet():
706
+ return [[None, "Welcome! Please log in to Hugging Face and provide your Google AI Studio API key to start building Spaces. Once ready, type 'generate me a gradio app called myapp' or 'create' to begin."]]
707
+
708
+ ai_builder_tab.load(greet, outputs=chatbot)
709
+
710
+
711
  if __name__ == "__main__":
712
+ # Set max retries for requests used by huggingface_hub internally
713
+ # This can help with transient network issues
714
+ # from requests.adapters import HTTPAdapter
715
+ # from urllib3.util.retry import Retry
716
+ # retry_strategy = Retry(total=3, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504])
717
+ # adapter = HTTPAdapter(max_retries=retry_strategy)
718
+ # get_session().mount("http://", adapter)
719
+ # get_session().mount("https://", adapter)
720
+
721
+ ai_builder_tab.launch()