Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ import requests
|
|
7 |
|
8 |
import gradio as gr
|
9 |
import google.generativeai as genai
|
|
|
10 |
|
11 |
from huggingface_hub import create_repo, list_models, upload_file, constants
|
12 |
from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
|
@@ -53,7 +54,7 @@ def create_space_action(repo_name: str, sdk: str, profile: gr.OAuthProfile, toke
|
|
53 |
iframe = f'<iframe src="{url}" width="100%" height="500px"></iframe>'
|
54 |
return repo_id, iframe
|
55 |
except Exception as e:
|
56 |
-
raise RuntimeError(f"Failed to create Space {repo_id}
|
57 |
|
58 |
def upload_file_to_space_action(
|
59 |
file_obj: io.StringIO, # Specify type hint for clarity
|
@@ -74,7 +75,7 @@ def upload_file_to_space_action(
|
|
74 |
repo_type="space"
|
75 |
)
|
76 |
except Exception as e:
|
77 |
-
raise RuntimeError(f"Failed to upload `{path_in_repo}` to {repo_id}
|
78 |
|
79 |
def _fetch_space_logs_level(repo_id: str, level: str, token: str) -> str:
|
80 |
"""Fetches build or run logs for a Space."""
|
@@ -82,9 +83,9 @@ def _fetch_space_logs_level(repo_id: str, level: str, token: str) -> str:
|
|
82 |
return f"Cannot fetch {level} logs: repo_id or token missing."
|
83 |
jwt_url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
|
84 |
try:
|
85 |
-
r
|
86 |
hf_raise_for_status(r) # Raise HTTPError for bad responses (4xx or 5xx)
|
87 |
-
jwt
|
88 |
logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
|
89 |
lines, count = [], 0
|
90 |
# Using stream=True is good for potentially large logs
|
@@ -99,8 +100,8 @@ def _fetch_space_logs_level(repo_id: str, level: str, token: str) -> str:
|
|
99 |
payload = raw[len(b"data: "):]
|
100 |
try:
|
101 |
event = json.loads(payload.decode())
|
102 |
-
ts
|
103 |
-
txt
|
104 |
if txt:
|
105 |
lines.append(f"[{ts}] {txt}")
|
106 |
count += 1
|
@@ -110,7 +111,7 @@ def _fetch_space_logs_level(repo_id: str, level: str, token: str) -> str:
|
|
110 |
return "\n".join(lines) if lines else f"No {level} logs found."
|
111 |
except Exception as e:
|
112 |
# Catching generic exception is acceptable for helper functions
|
113 |
-
return f"Error fetching {level} logs
|
114 |
|
115 |
|
116 |
def get_build_logs_action(repo_id, profile, token):
|
@@ -130,33 +131,52 @@ def get_container_logs_action(repo_id, profile, token):
|
|
130 |
return _fetch_space_logs_level(repo_id, "run", token.token)
|
131 |
|
132 |
|
133 |
-
# --- Google Gemini integration with model selection ---
|
134 |
|
135 |
def configure_gemini(api_key: str | None, model_name: str | None) -> str:
|
136 |
"""Configures the Gemini API and checks if the model is accessible."""
|
137 |
if not api_key:
|
138 |
-
return "Gemini API key is not set."
|
139 |
if not model_name:
|
140 |
-
return "Please select a Gemini model."
|
141 |
try:
|
142 |
genai.configure(api_key=api_key)
|
143 |
# Attempt a simple call to verify credentials and model availability
|
144 |
# This will raise an exception if the key is invalid or model not found
|
145 |
genai.GenerativeModel(model_name).generate_content("ping", stream=False)
|
146 |
-
return f"Gemini configured successfully with **{model_name}**."
|
147 |
except Exception as e:
|
148 |
-
return f"Error configuring Gemini: {e}"
|
149 |
|
150 |
-
def call_gemini(prompt: str, api_key: str, model_name: str) -> str:
|
151 |
-
"""Calls the Gemini API with a given prompt."""
|
152 |
if not api_key or not model_name:
|
153 |
raise ValueError("Gemini API key or model not set.")
|
154 |
try:
|
155 |
genai.configure(api_key=api_key)
|
156 |
model = genai.GenerativeModel(model_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
# Using generate_content and stream=False for simplicity here
|
158 |
-
response = model.generate_content(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
159 |
return response.text or "" # Return empty string if no text
|
|
|
160 |
except Exception as e:
|
161 |
# Re-raising as RuntimeError for the workflow to catch and manage
|
162 |
raise RuntimeError(f"Gemini API call failed: {e}")
|
@@ -205,6 +225,7 @@ def ai_workflow_chat(
|
|
205 |
app_description_state: str | None,
|
206 |
repo_name_state: str | None,
|
207 |
generated_code_state: str | None,
|
|
|
208 |
# Absorb potential extra args passed by Gradio event listeners (e.g. old value, event data)
|
209 |
*args,
|
210 |
**kwargs
|
@@ -219,6 +240,7 @@ def ai_workflow_chat(
|
|
219 |
str | None, # 7: Updated app description
|
220 |
str | None, # 8: Updated repo name
|
221 |
str | None, # 9: Updated generated code (for temporary storage)
|
|
|
222 |
]:
|
223 |
"""
|
224 |
Generator function to handle the AI workflow state machine.
|
@@ -231,6 +253,7 @@ def ai_workflow_chat(
|
|
231 |
app_desc = app_description_state
|
232 |
repo_name = repo_name_state
|
233 |
generated_code = generated_code_state
|
|
|
234 |
|
235 |
# Keep copies of potentially updated UI elements passed as inputs to update them later
|
236 |
updated_preview = preview_html
|
@@ -246,8 +269,9 @@ def ai_workflow_chat(
|
|
246 |
|
247 |
# Yield immediately to update the chat UI with the user's message
|
248 |
# This provides immediate feedback to the user while the AI processes
|
|
|
249 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
250 |
-
attempts, app_desc, repo_name, generated_code)
|
251 |
|
252 |
try:
|
253 |
# --- State Machine Logic based on the current 'state' variable ---
|
@@ -258,14 +282,14 @@ def ai_workflow_chat(
|
|
258 |
history = add_bot_message(history, "Workflow paused: Please log in to Hugging Face first.")
|
259 |
# Yield updated history and current state, then exit for this click
|
260 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
261 |
-
attempts, app_desc, repo_name, generated_code)
|
262 |
return # Exit the generator for this click
|
263 |
|
264 |
if not (gemini_api_key and gemini_model):
|
265 |
history = add_bot_message(history, "Workflow paused: Please enter your API key and select a Gemini model.")
|
266 |
# Yield updated history and current state, then exit for this click
|
267 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
268 |
-
attempts, app_desc, repo_name, generated_code)
|
269 |
return # Exit the generator for this click
|
270 |
|
271 |
# Look for specific commands in the user's message
|
@@ -280,7 +304,7 @@ def ai_workflow_chat(
|
|
280 |
history = add_bot_message(history, "Workflow reset.")
|
281 |
# Yield updated history and reset state variables to their initial values
|
282 |
yield (history, None, STATE_IDLE, "<p>No Space created yet.</p>", "", "", 0,
|
283 |
-
None, None, None)
|
284 |
# No return needed after yield in this generator pattern; execution for this click ends here.
|
285 |
|
286 |
elif generate_match:
|
@@ -294,7 +318,7 @@ def ai_workflow_chat(
|
|
294 |
app_desc = new_app_desc
|
295 |
# Yield updated history and state variables
|
296 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
297 |
-
attempts, app_desc, repo_name, generated_code)
|
298 |
# No return needed
|
299 |
|
300 |
elif create_match:
|
@@ -306,7 +330,7 @@ def ai_workflow_chat(
|
|
306 |
repo_name = new_repo_name
|
307 |
# Yield updated history and state variables
|
308 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
309 |
-
attempts, app_desc, repo_name, generated_code)
|
310 |
# No return needed
|
311 |
|
312 |
elif "create" in message.lower() and not repo_id:
|
@@ -315,7 +339,7 @@ def ai_workflow_chat(
|
|
315 |
state = STATE_AWAITING_REPO_NAME # Transition to the state where we wait for the name
|
316 |
# Yield updated history and state
|
317 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
318 |
-
attempts, app_desc, repo_name, generated_code)
|
319 |
# No return needed
|
320 |
|
321 |
else:
|
@@ -323,7 +347,7 @@ def ai_workflow_chat(
|
|
323 |
history = add_bot_message(history, "Command not recognized. Try 'generate me a gradio app called myapp', or 'reset'.")
|
324 |
# Yield updated history and current state
|
325 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
326 |
-
attempts, app_desc, repo_name, generated_code)
|
327 |
# No return needed
|
328 |
|
329 |
|
@@ -331,11 +355,12 @@ def ai_workflow_chat(
|
|
331 |
# User's message is expected to be the repo name
|
332 |
new_repo_name = message.strip()
|
333 |
# Basic validation for Hugging Face repo name format
|
334 |
-
|
335 |
-
|
|
|
336 |
# Stay in AWAITING_REPO_NAME state and yield message
|
337 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
338 |
-
attempts, app_desc, repo_name, generated_code)
|
339 |
# No return needed
|
340 |
|
341 |
else:
|
@@ -345,7 +370,7 @@ def ai_workflow_chat(
|
|
345 |
# Yield updated history, state, and repo name.
|
346 |
# The next click will proceed from the STATE_CREATING_SPACE block.
|
347 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
348 |
-
attempts, app_desc, repo_name, generated_code)
|
349 |
# No return needed
|
350 |
|
351 |
# Note: Each 'elif' block below represents a distinct step in the workflow triggered
|
@@ -356,7 +381,7 @@ def ai_workflow_chat(
|
|
356 |
if not repo_name:
|
357 |
history = add_bot_message(history, "Internal error: Repo name missing for creation. Resetting.")
|
358 |
yield (history, None, STATE_IDLE, "<p>Error creating space.</p>", "", "", 0,
|
359 |
-
None, None, None)
|
360 |
# No return needed
|
361 |
|
362 |
else:
|
@@ -369,38 +394,41 @@ def ai_workflow_chat(
|
|
369 |
state = STATE_GENERATING_CODE # Transition to the next state
|
370 |
# Yield updated state variables and history
|
371 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
372 |
-
attempts, app_desc, repo_name, generated_code)
|
373 |
# No return needed
|
374 |
|
375 |
except Exception as e:
|
376 |
history = add_bot_message(history, f"❌ Error creating space: {e}. Click 'reset'.")
|
377 |
# Yield error message and reset state on failure
|
378 |
yield (history, None, STATE_IDLE, "<p>Error creating space.</p>", "", "", 0,
|
379 |
-
None, None, None)
|
380 |
# No return needed
|
381 |
|
382 |
|
383 |
elif state == STATE_GENERATING_CODE:
|
384 |
# Define the prompt for Gemini based on the app description or a default
|
385 |
-
prompt_desc = app_desc if app_desc else 'a
|
386 |
prompt = f"""
|
387 |
You are an AI assistant specializing in Hugging Face Spaces using the {space_sdk} SDK.
|
388 |
Generate a full, single-file Python app based on:
|
389 |
'{prompt_desc}'
|
390 |
-
|
|
|
391 |
"""
|
392 |
try:
|
393 |
-
history = add_bot_message(history, "🧠 Generating `app.py` code with Gemini...")
|
394 |
# Yield to show message before the potentially time-consuming API call
|
395 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
396 |
-
attempts, app_desc, repo_name, generated_code)
|
397 |
|
398 |
-
# Perform the Gemini API call to generate code
|
399 |
-
code = call_gemini(prompt, gemini_api_key, gemini_model)
|
400 |
code = code.strip()
|
401 |
# Clean up common markdown code block formatting if present
|
402 |
if code.startswith("```python"):
|
403 |
code = code[len("```python"):].strip()
|
|
|
|
|
404 |
if code.endswith("```"):
|
405 |
code = code[:-len("```")].strip()
|
406 |
|
@@ -412,14 +440,14 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
412 |
generated_code = code # Store the generated code in the state variable for the next step
|
413 |
# Yield updated state variables and history
|
414 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
415 |
-
attempts, app_desc, repo_name, generated_code)
|
416 |
# No return needed
|
417 |
|
418 |
except Exception as e:
|
419 |
history = add_bot_message(history, f"❌ Error generating code: {e}. Click 'reset'.")
|
420 |
# Yield error message and reset state on failure
|
421 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
422 |
-
None, None, None)
|
423 |
# No return needed
|
424 |
|
425 |
|
@@ -429,14 +457,14 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
429 |
if not code_to_upload:
|
430 |
history = add_bot_message(history, "Internal error: No code to upload. Resetting.")
|
431 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
432 |
-
None, None, None)
|
433 |
# No return needed
|
434 |
|
435 |
else:
|
436 |
history = add_bot_message(history, "☁️ Uploading `app.py`...")
|
437 |
# Yield to show message before the upload action
|
438 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
439 |
-
attempts, app_desc, repo_name, generated_code)
|
440 |
|
441 |
try:
|
442 |
# Perform the file upload action
|
@@ -446,14 +474,14 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
446 |
generated_code = None # Clear the stored code after use to free memory/state space
|
447 |
# Yield updated state variables and history
|
448 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
449 |
-
attempts, app_desc, repo_name, generated_code)
|
450 |
# No return needed
|
451 |
|
452 |
except Exception as e:
|
453 |
-
history = add_bot_message(history, f"❌ Error uploading app.py
|
454 |
# Yield error message and reset state on failure
|
455 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
456 |
-
None, None, None)
|
457 |
# No return needed
|
458 |
|
459 |
|
@@ -461,27 +489,18 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
461 |
history = add_bot_message(history, "📄 Generating `requirements.txt`...")
|
462 |
# Yield to show message before generating requirements
|
463 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
464 |
-
attempts, app_desc, repo_name, generated_code)
|
465 |
|
466 |
# Logic to determine required packages based on SDK and keywords in the app description
|
467 |
reqs_list = ["gradio"] if space_sdk == "gradio" else ["streamlit"]
|
468 |
-
|
|
|
|
|
|
|
469 |
app_desc_lower = app_desc.lower()
|
470 |
-
if "google.generativeai" in app_desc_lower or "gemini" in app_desc_lower or gemini_api_key:
|
471 |
-
reqs_list.append("google-generativeai")
|
472 |
-
if "requests" in app_desc_lower:
|
473 |
-
reqs_list.append("requests")
|
474 |
-
# Add common libraries if description suggests they might be needed
|
475 |
-
if "image" in app_desc_lower or "upload" in app_desc_lower or "blur" in app_desc_lower or "vision" in app_desc_lower:
|
476 |
-
reqs_list.append("Pillow") # Pillow is a common image processing library
|
477 |
-
if "numpy" in app_desc_lower: reqs_list.append("numpy")
|
478 |
-
if "pandas" in app_desc_lower: reqs_list.append("pandas")
|
479 |
-
# Add scikit-image and opencv if image processing is heavily implied
|
480 |
if any(lib in app_desc_lower for lib in ["scikit-image", "skimage", "cv2", "opencv"]):
|
481 |
reqs_list.extend(["scikit-image", "opencv-python"]) # Note: opencv-python for pip
|
482 |
|
483 |
-
# Add essential libraries regardless of description keywords
|
484 |
-
reqs_list.append("huggingface_hub") # Needed for interaction helpers if used in app
|
485 |
|
486 |
# Use dict.fromkeys to get unique items while preserving insertion order (Python 3.7+)
|
487 |
reqs_list = list(dict.fromkeys(reqs_list))
|
@@ -493,7 +512,7 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
493 |
generated_code = reqs_content # Store requirements content
|
494 |
# Yield updated state variables and history
|
495 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
496 |
-
attempts, app_desc, repo_name, generated_code)
|
497 |
# No return needed
|
498 |
|
499 |
|
@@ -503,14 +522,14 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
503 |
if not reqs_content_to_upload:
|
504 |
history = add_bot_message(history, "Internal error: No requirements content to upload. Resetting.")
|
505 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
506 |
-
None, None, None)
|
507 |
# No return needed
|
508 |
|
509 |
else:
|
510 |
history = add_bot_message(history, "☁️ Uploading `requirements.txt`...")
|
511 |
# Yield to show message before upload
|
512 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
513 |
-
attempts, app_desc, repo_name, generated_code)
|
514 |
|
515 |
try:
|
516 |
# Perform requirements file upload
|
@@ -520,21 +539,21 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
520 |
generated_code = None # Clear content after use
|
521 |
# Yield updated state variables and history
|
522 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
523 |
-
attempts, app_desc, repo_name, generated_code)
|
524 |
# No return needed
|
525 |
|
526 |
except Exception as e:
|
527 |
-
history = add_bot_message(history, f"❌ Error uploading requirements.txt
|
528 |
# Yield error message and reset state on failure
|
529 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
530 |
-
None, None, None)
|
531 |
# No return needed
|
532 |
|
533 |
elif state == STATE_GENERATING_README:
|
534 |
history = add_bot_message(history, "📝 Generating `README.md`...")
|
535 |
# Yield message before generating README
|
536 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
537 |
-
attempts, app_desc, repo_name, generated_code)
|
538 |
|
539 |
# Generate simple README content with Space metadata header
|
540 |
readme_title = repo_name if repo_name else "My Awesome Space"
|
@@ -562,7 +581,7 @@ This Space was automatically generated by an AI workflow using Google Gemini and
|
|
562 |
generated_code = readme_content # Store README content
|
563 |
# Yield updated state variables and history
|
564 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
565 |
-
attempts, app_desc, repo_name, generated_code)
|
566 |
# No return needed
|
567 |
|
568 |
|
@@ -572,14 +591,14 @@ This Space was automatically generated by an AI workflow using Google Gemini and
|
|
572 |
if not readme_content_to_upload:
|
573 |
history = add_bot_message(history, "Internal error: No README content to upload. Resetting.")
|
574 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
575 |
-
None, None, None)
|
576 |
# No return needed
|
577 |
|
578 |
else:
|
579 |
history = add_bot_message(history, "☁️ Uploading `README.md`...")
|
580 |
# Yield message before upload
|
581 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
582 |
-
attempts, app_desc, repo_name, generated_code)
|
583 |
|
584 |
try:
|
585 |
# Perform README file upload
|
@@ -589,21 +608,21 @@ This Space was automatically generated by an AI workflow using Google Gemini and
|
|
589 |
generated_code = None # Clear content after use
|
590 |
# Yield updated state variables and history
|
591 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
592 |
-
attempts, app_desc, repo_name, generated_code)
|
593 |
# No return needed
|
594 |
|
595 |
except Exception as e:
|
596 |
-
history = add_bot_message(history, f"❌ Error uploading README.md
|
597 |
# Yield error message and reset state on failure
|
598 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
599 |
-
None, None, None)
|
600 |
# No return needed
|
601 |
|
602 |
elif state == STATE_CHECKING_LOGS_BUILD:
|
603 |
history = add_bot_message(history, "🔍 Fetching build logs...")
|
604 |
# Yield message before fetching logs (which includes a delay)
|
605 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
606 |
-
attempts, app_desc, repo_name, generated_code)
|
607 |
|
608 |
# Fetch build logs from HF Space
|
609 |
build_logs_text = get_build_logs_action(repo_id, hf_profile, hf_token)
|
@@ -615,7 +634,7 @@ This Space was automatically generated by an AI workflow using Google Gemini and
|
|
615 |
state = STATE_CHECKING_LOGS_RUN # Transition even on build error, to see if container starts
|
616 |
# Yield updated state, logs, and variables
|
617 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
618 |
-
attempts, app_desc, repo_name, generated_code)
|
619 |
# No return needed
|
620 |
|
621 |
else:
|
@@ -623,7 +642,7 @@ This Space was automatically generated by an AI workflow using Google Gemini and
|
|
623 |
state = STATE_CHECKING_LOGS_RUN # Transition to next log check
|
624 |
# Yield updated state, logs, and variables
|
625 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
626 |
-
attempts, app_desc, repo_name, generated_code)
|
627 |
# No return needed
|
628 |
|
629 |
|
@@ -631,7 +650,7 @@ This Space was automatically generated by an AI workflow using Google Gemini and
|
|
631 |
history = add_bot_message(history, "🔍 Fetching container logs...")
|
632 |
# Yield message before fetching logs (includes a delay)
|
633 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
634 |
-
attempts, app_desc, repo_name, generated_code)
|
635 |
|
636 |
# Fetch container logs from HF Space
|
637 |
container_logs_text = get_container_logs_action(repo_id, hf_profile, hf_token)
|
@@ -644,7 +663,7 @@ This Space was automatically generated by an AI workflow using Google Gemini and
|
|
644 |
state = STATE_DEBUGGING_CODE # Transition to the debugging state
|
645 |
# Yield updated state, logs, attempts, and variables
|
646 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
647 |
-
attempts, app_desc, repo_name, generated_code)
|
648 |
# No return needed
|
649 |
|
650 |
elif ("error" in updated_run.lower() or "exception" in updated_run.lower()) and attempts >= MAX_DEBUG_ATTEMPTS:
|
@@ -653,7 +672,7 @@ This Space was automatically generated by an AI workflow using Google Gemini and
|
|
653 |
state = STATE_COMPLETE # Workflow ends on failure after attempts
|
654 |
# Yield updated state, logs, attempts, and variables
|
655 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
656 |
-
attempts, app_desc, repo_name, generated_code)
|
657 |
# No return needed
|
658 |
|
659 |
else:
|
@@ -662,7 +681,7 @@ This Space was automatically generated by an AI workflow using Google Gemini and
|
|
662 |
state = STATE_COMPLETE # Workflow ends on success
|
663 |
# Yield updated state, logs, attempts, and variables
|
664 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
665 |
-
attempts, app_desc, repo_name, generated_code)
|
666 |
# No return needed
|
667 |
|
668 |
|
@@ -670,26 +689,28 @@ This Space was automatically generated by an AI workflow using Google Gemini and
|
|
670 |
history = add_bot_message(history, f"🧠 Calling Gemini to generate fix based on logs...")
|
671 |
# Yield message before Gemini API call
|
672 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
673 |
-
attempts, app_desc, repo_name, generated_code)
|
674 |
|
675 |
# Construct prompt for Gemini including the container logs
|
676 |
debug_prompt = f"""
|
677 |
You are debugging a {space_sdk} Space. The goal is to fix the code in `app.py` based on the container logs provided.
|
678 |
|
679 |
Here are the container logs:
|
680 |
-
Use code with caution.
|
681 |
-
Python
|
682 |
{updated_run}
|
683 |
Generate the *complete, fixed* content for `app.py` based on these logs.
|
684 |
Return **only** the python code block for app.py. Do not include any extra text, explanations, or markdown outside the code block.
|
685 |
"""
|
686 |
try:
|
687 |
-
# Call Gemini to generate the corrected code
|
688 |
-
|
|
|
|
|
689 |
fix_code = fix_code.strip()
|
690 |
# Clean up potential markdown formatting
|
691 |
if fix_code.startswith("```python"):
|
692 |
fix_code = fix_code[len("```python"):].strip()
|
|
|
|
|
693 |
if fix_code.endswith("```"):
|
694 |
fix_code = fix_code[:-len("```")].strip()
|
695 |
|
@@ -701,14 +722,14 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
701 |
generated_code = fix_code # Store the generated fix code
|
702 |
# Yield updated state, code, and variables
|
703 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
704 |
-
attempts, app_desc, repo_name, generated_code)
|
705 |
# No return needed
|
706 |
|
707 |
except Exception as e:
|
708 |
history = add_bot_message(history, f"❌ Error generating debug code: {e}. Click 'reset'.")
|
709 |
# Yield error message and reset state on failure
|
710 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
711 |
-
None, None, None)
|
712 |
# No return needed
|
713 |
|
714 |
elif state == STATE_UPLOADING_FIXED_APP_PY:
|
@@ -717,14 +738,14 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
717 |
if not fixed_code_to_upload:
|
718 |
history = add_bot_message(history, "Internal error: No fixed code available to upload. Resetting.")
|
719 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
720 |
-
None, None, None)
|
721 |
# No return needed
|
722 |
|
723 |
else:
|
724 |
history = add_bot_message(history, "☁️ Uploading fixed `app.py`...")
|
725 |
# Yield message before upload
|
726 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
727 |
-
attempts, app_desc, repo_name, generated_code)
|
728 |
|
729 |
try:
|
730 |
# Perform the upload of the fixed app.py
|
@@ -734,21 +755,21 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
734 |
generated_code = None # Clear code after use
|
735 |
# Yield updated state, code, and variables
|
736 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
737 |
-
attempts, app_desc, repo_name, generated_code)
|
738 |
# No return needed
|
739 |
|
740 |
except Exception as e:
|
741 |
-
history = add_bot_message(history, f"❌ Error uploading fixed app.py
|
742 |
# Yield error message and reset state on failure
|
743 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
744 |
-
None, None, None)
|
745 |
# No return needed
|
746 |
|
747 |
elif state == STATE_COMPLETE:
|
748 |
# If in the complete state, the workflow is finished for this project.
|
749 |
# Subsequent clicks just add user messages; we simply yield the current state.
|
750 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
751 |
-
attempts, app_desc, repo_name, generated_code)
|
752 |
# No return needed
|
753 |
|
754 |
|
@@ -759,7 +780,7 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
759 |
print(f"Critical Error in state {state}: {e}") # Log the error for debugging purposes
|
760 |
# Yield an error state and reset essential workflow variables on critical failure
|
761 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
762 |
-
None, None, None)
|
763 |
# No return needed after yield
|
764 |
|
765 |
|
@@ -769,7 +790,8 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
769 |
# Gradio State variables - these persist their values across user interactions (clicks)
|
770 |
hf_profile = gr.State(None)
|
771 |
hf_token = gr.State(None)
|
772 |
-
gemini_key
|
|
|
773 |
gemini_model = gr.State("gemini-1.5-flash") # Default selected model
|
774 |
repo_id = gr.State(None) # Stores the ID of the created Space
|
775 |
workflow = gr.State(STATE_IDLE) # Stores the current state of the AI workflow
|
@@ -778,6 +800,8 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
778 |
app_description = gr.State(None) # Stores the user's initial description of the desired app
|
779 |
repo_name_state = gr.State(None) # Stores the chosen repository name for the Space
|
780 |
generated_code_state = gr.State(None) # Temporary storage for generated file content (app.py, reqs, README)
|
|
|
|
|
781 |
|
782 |
with gr.Row():
|
783 |
# Sidebar column for inputs and status displays
|
@@ -788,21 +812,22 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
788 |
login_btn = gr.LoginButton(variant="huggingface")
|
789 |
|
790 |
# Initial load event to check login status (if cached)
|
|
|
791 |
ai_builder_tab.load(show_profile, outputs=login_status)
|
792 |
# Update status display when login button reports success
|
793 |
login_btn.click(show_profile, outputs=login_status)
|
794 |
|
795 |
-
gr.Markdown("## Google AI Studio
|
796 |
# Textbox for Gemini API key. Read from environment variable if available.
|
797 |
gemini_input = gr.Textbox(
|
798 |
label="API Key",
|
799 |
type="password", # Hides input for security
|
800 |
interactive=True,
|
801 |
-
value=os.environ.get("GOOGLE_API_KEY") # Pre-fill if GOOGLE_API_KEY env var is set
|
|
|
802 |
)
|
803 |
gemini_status = gr.Markdown("") # Display Gemini configuration status
|
804 |
|
805 |
-
gr.Markdown("## Gemini Model")
|
806 |
# Radio buttons to select the Gemini model
|
807 |
model_selector = gr.Radio(
|
808 |
choices=[
|
@@ -816,12 +841,26 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
816 |
)
|
817 |
|
818 |
# Configure Gemini status on initial load (if API key env var is set)
|
|
|
819 |
ai_builder_tab.load(
|
820 |
configure_gemini,
|
821 |
inputs=[gemini_key, gemini_model],
|
822 |
outputs=[gemini_status]
|
823 |
)
|
824 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
825 |
gr.Markdown("## Space SDK")
|
826 |
# Radio buttons to select the Space SDK (Gradio or Streamlit)
|
827 |
sdk_selector = gr.Radio(choices=["gradio","streamlit"], value="gradio", label="Template SDK", interactive=True)
|
@@ -876,10 +915,10 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
876 |
status_str = "✅ Ready to send commands."
|
877 |
else:
|
878 |
status_str = " ".join(status_parts)
|
879 |
-
|
|
|
880 |
status_str = "Checking prerequisites..."
|
881 |
|
882 |
-
|
883 |
# gr.update is used to dynamically change a component's properties
|
884 |
return gr.update(interactive=is_ready), status_str
|
885 |
|
@@ -958,14 +997,16 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
958 |
gemini_key, gemini_model, # Gemini State variables
|
959 |
repo_id, workflow, sdk_state, # Workflow State variables
|
960 |
iframe, run_txt, build_txt, # UI outputs whose current values are needed by the generator
|
961 |
-
debug_attempts, app_description, repo_name_state, generated_code_state # Other State variables
|
|
|
962 |
],
|
963 |
# Outputs are updated by the values yielded from the generator
|
964 |
outputs=[
|
965 |
chatbot, # Update Chatbot with new messages
|
966 |
repo_id, workflow, # Update workflow State variables
|
967 |
iframe, run_txt, build_txt, # Update UI outputs
|
968 |
-
debug_attempts, app_description, repo_name_state, generated_code_state # Update other State variables
|
|
|
969 |
]
|
970 |
).success( # Chain a .success() event to run *after* the .click() handler completes without error
|
971 |
# Clear the user input textbox after the message is sent and processed
|
@@ -1006,4 +1047,4 @@ if __name__ == "__main__":
|
|
1006 |
os.makedirs(os.environ["GRADIO_TEMP_DIR"], exist_ok=True) # Ensure the directory exists
|
1007 |
|
1008 |
# Launch the Gradio UI
|
1009 |
-
ai_builder_tab.launch()
|
|
|
7 |
|
8 |
import gradio as gr
|
9 |
import google.generativeai as genai
|
10 |
+
from google.generativeai import types # Import types for configuration and tools
|
11 |
|
12 |
from huggingface_hub import create_repo, list_models, upload_file, constants
|
13 |
from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
|
|
|
54 |
iframe = f'<iframe src="{url}" width="100%" height="500px"></iframe>'
|
55 |
return repo_id, iframe
|
56 |
except Exception as e:
|
57 |
+
raise RuntimeError(f"Failed to create Space `{repo_id}`: {e}")
|
58 |
|
59 |
def upload_file_to_space_action(
|
60 |
file_obj: io.StringIO, # Specify type hint for clarity
|
|
|
75 |
repo_type="space"
|
76 |
)
|
77 |
except Exception as e:
|
78 |
+
raise RuntimeError(f"Failed to upload `{path_in_repo}` to `{repo_id}`: {e}")
|
79 |
|
80 |
def _fetch_space_logs_level(repo_id: str, level: str, token: str) -> str:
|
81 |
"""Fetches build or run logs for a Space."""
|
|
|
83 |
return f"Cannot fetch {level} logs: repo_id or token missing."
|
84 |
jwt_url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt"
|
85 |
try:
|
86 |
+
r = get_session().get(jwt_url, headers=build_hf_headers(token=token))
|
87 |
hf_raise_for_status(r) # Raise HTTPError for bad responses (4xx or 5xx)
|
88 |
+
jwt = r.json()["token"]
|
89 |
logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
|
90 |
lines, count = [], 0
|
91 |
# Using stream=True is good for potentially large logs
|
|
|
100 |
payload = raw[len(b"data: "):]
|
101 |
try:
|
102 |
event = json.loads(payload.decode())
|
103 |
+
ts = event.get("timestamp", "")
|
104 |
+
txt = event.get("data", "").strip()
|
105 |
if txt:
|
106 |
lines.append(f"[{ts}] {txt}")
|
107 |
count += 1
|
|
|
111 |
return "\n".join(lines) if lines else f"No {level} logs found."
|
112 |
except Exception as e:
|
113 |
# Catching generic exception is acceptable for helper functions
|
114 |
+
return f"Error fetching {level} logs for `{repo_id}`: {e}"
|
115 |
|
116 |
|
117 |
def get_build_logs_action(repo_id, profile, token):
|
|
|
131 |
return _fetch_space_logs_level(repo_id, "run", token.token)
|
132 |
|
133 |
|
134 |
+
# --- Google Gemini integration with model selection and grounding ---
|
135 |
|
136 |
def configure_gemini(api_key: str | None, model_name: str | None) -> str:
|
137 |
"""Configures the Gemini API and checks if the model is accessible."""
|
138 |
if not api_key:
|
139 |
+
return "⚠️ Gemini API key is not set."
|
140 |
if not model_name:
|
141 |
+
return "⚠️ Please select a Gemini model."
|
142 |
try:
|
143 |
genai.configure(api_key=api_key)
|
144 |
# Attempt a simple call to verify credentials and model availability
|
145 |
# This will raise an exception if the key is invalid or model not found
|
146 |
genai.GenerativeModel(model_name).generate_content("ping", stream=False)
|
147 |
+
return f"✅ Gemini configured successfully with **{model_name}**."
|
148 |
except Exception as e:
|
149 |
+
return f"❌ Error configuring Gemini: {e}"
|
150 |
|
151 |
+
def call_gemini(prompt: str, api_key: str, model_name: str, use_grounding: bool = False) -> str:
|
152 |
+
"""Calls the Gemini API with a given prompt, optionally using grounding."""
|
153 |
if not api_key or not model_name:
|
154 |
raise ValueError("Gemini API key or model not set.")
|
155 |
try:
|
156 |
genai.configure(api_key=api_key)
|
157 |
model = genai.GenerativeModel(model_name)
|
158 |
+
|
159 |
+
# Define tools for grounding if requested.
|
160 |
+
# Using genai.types.GoogleSearch() is recommended for Gemini 2.0+
|
161 |
+
# and is backwards compatible with 1.5 for retrieval.
|
162 |
+
tools_config = [types.Tool(google_search=types.GoogleSearch())] if use_grounding else None
|
163 |
+
|
164 |
# Using generate_content and stream=False for simplicity here
|
165 |
+
response = model.generate_content(
|
166 |
+
prompt,
|
167 |
+
stream=False,
|
168 |
+
tools=tools_config # Pass the tools configuration
|
169 |
+
)
|
170 |
+
# Check if response is blocked
|
171 |
+
if response.prompt_feedback and response.prompt_feedback.block_reason:
|
172 |
+
raise RuntimeError(f"Gemini API call blocked: {response.prompt_feedback.block_reason}")
|
173 |
+
if not response.candidates:
|
174 |
+
raise RuntimeError("Gemini API call returned no candidates.")
|
175 |
+
|
176 |
+
# If response.candidates is not empty, get the text
|
177 |
+
# Using response.text is a convenient way to get text from the first candidate part
|
178 |
return response.text or "" # Return empty string if no text
|
179 |
+
|
180 |
except Exception as e:
|
181 |
# Re-raising as RuntimeError for the workflow to catch and manage
|
182 |
raise RuntimeError(f"Gemini API call failed: {e}")
|
|
|
225 |
app_description_state: str | None,
|
226 |
repo_name_state: str | None,
|
227 |
generated_code_state: str | None,
|
228 |
+
use_grounding_state: bool, # New input for grounding state
|
229 |
# Absorb potential extra args passed by Gradio event listeners (e.g. old value, event data)
|
230 |
*args,
|
231 |
**kwargs
|
|
|
240 |
str | None, # 7: Updated app description
|
241 |
str | None, # 8: Updated repo name
|
242 |
str | None, # 9: Updated generated code (for temporary storage)
|
243 |
+
bool, # 10: Updated use_grounding_state (must return all state vars passed in)
|
244 |
]:
|
245 |
"""
|
246 |
Generator function to handle the AI workflow state machine.
|
|
|
253 |
app_desc = app_description_state
|
254 |
repo_name = repo_name_state
|
255 |
generated_code = generated_code_state
|
256 |
+
use_grounding = use_grounding_state # Unpack grounding state
|
257 |
|
258 |
# Keep copies of potentially updated UI elements passed as inputs to update them later
|
259 |
updated_preview = preview_html
|
|
|
269 |
|
270 |
# Yield immediately to update the chat UI with the user's message
|
271 |
# This provides immediate feedback to the user while the AI processes
|
272 |
+
# Ensure all state variables are yielded back
|
273 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
274 |
+
attempts, app_desc, repo_name, generated_code, use_grounding)
|
275 |
|
276 |
try:
|
277 |
# --- State Machine Logic based on the current 'state' variable ---
|
|
|
282 |
history = add_bot_message(history, "Workflow paused: Please log in to Hugging Face first.")
|
283 |
# Yield updated history and current state, then exit for this click
|
284 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
285 |
+
attempts, app_desc, repo_name, generated_code, use_grounding)
|
286 |
return # Exit the generator for this click
|
287 |
|
288 |
if not (gemini_api_key and gemini_model):
|
289 |
history = add_bot_message(history, "Workflow paused: Please enter your API key and select a Gemini model.")
|
290 |
# Yield updated history and current state, then exit for this click
|
291 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
292 |
+
attempts, app_desc, repo_name, generated_code, use_grounding)
|
293 |
return # Exit the generator for this click
|
294 |
|
295 |
# Look for specific commands in the user's message
|
|
|
304 |
history = add_bot_message(history, "Workflow reset.")
|
305 |
# Yield updated history and reset state variables to their initial values
|
306 |
yield (history, None, STATE_IDLE, "<p>No Space created yet.</p>", "", "", 0,
|
307 |
+
None, None, None, use_grounding) # Include use_grounding
|
308 |
# No return needed after yield in this generator pattern; execution for this click ends here.
|
309 |
|
310 |
elif generate_match:
|
|
|
318 |
app_desc = new_app_desc
|
319 |
# Yield updated history and state variables
|
320 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
321 |
+
attempts, app_desc, repo_name, generated_code, use_grounding)
|
322 |
# No return needed
|
323 |
|
324 |
elif create_match:
|
|
|
330 |
repo_name = new_repo_name
|
331 |
# Yield updated history and state variables
|
332 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
333 |
+
attempts, app_desc, repo_name, generated_code, use_grounding)
|
334 |
# No return needed
|
335 |
|
336 |
elif "create" in message.lower() and not repo_id:
|
|
|
339 |
state = STATE_AWAITING_REPO_NAME # Transition to the state where we wait for the name
|
340 |
# Yield updated history and state
|
341 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
342 |
+
attempts, app_desc, repo_name, generated_code, use_grounding)
|
343 |
# No return needed
|
344 |
|
345 |
else:
|
|
|
347 |
history = add_bot_message(history, "Command not recognized. Try 'generate me a gradio app called myapp', or 'reset'.")
|
348 |
# Yield updated history and current state
|
349 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
350 |
+
attempts, app_desc, repo_name, generated_code, use_grounding)
|
351 |
# No return needed
|
352 |
|
353 |
|
|
|
355 |
# User's message is expected to be the repo name
|
356 |
new_repo_name = message.strip()
|
357 |
# Basic validation for Hugging Face repo name format
|
358 |
+
# Allow letters, numbers, hyphens, underscores, max 100 chars (HF limit check)
|
359 |
+
if not new_repo_name or re.search(r'[^a-zA-Z0-9_-]', new_repo_name) or len(new_repo_name) > 100:
|
360 |
+
history = add_bot_message(history, "Invalid name. Please provide a single word/slug for the Space name (letters, numbers, underscores, hyphens only, max 100 chars).")
|
361 |
# Stay in AWAITING_REPO_NAME state and yield message
|
362 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
363 |
+
attempts, app_desc, repo_name, generated_code, use_grounding)
|
364 |
# No return needed
|
365 |
|
366 |
else:
|
|
|
370 |
# Yield updated history, state, and repo name.
|
371 |
# The next click will proceed from the STATE_CREATING_SPACE block.
|
372 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
373 |
+
attempts, app_desc, repo_name, generated_code, use_grounding)
|
374 |
# No return needed
|
375 |
|
376 |
# Note: Each 'elif' block below represents a distinct step in the workflow triggered
|
|
|
381 |
if not repo_name:
|
382 |
history = add_bot_message(history, "Internal error: Repo name missing for creation. Resetting.")
|
383 |
yield (history, None, STATE_IDLE, "<p>Error creating space.</p>", "", "", 0,
|
384 |
+
None, None, None, use_grounding) # Include use_grounding
|
385 |
# No return needed
|
386 |
|
387 |
else:
|
|
|
394 |
state = STATE_GENERATING_CODE # Transition to the next state
|
395 |
# Yield updated state variables and history
|
396 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
397 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
398 |
# No return needed
|
399 |
|
400 |
except Exception as e:
|
401 |
history = add_bot_message(history, f"❌ Error creating space: {e}. Click 'reset'.")
|
402 |
# Yield error message and reset state on failure
|
403 |
yield (history, None, STATE_IDLE, "<p>Error creating space.</p>", "", "", 0,
|
404 |
+
None, None, None, use_grounding) # Include use_grounding
|
405 |
# No return needed
|
406 |
|
407 |
|
408 |
elif state == STATE_GENERATING_CODE:
|
409 |
# Define the prompt for Gemini based on the app description or a default
|
410 |
+
prompt_desc = app_desc if app_desc else f'a simple {space_sdk} app'
|
411 |
prompt = f"""
|
412 |
You are an AI assistant specializing in Hugging Face Spaces using the {space_sdk} SDK.
|
413 |
Generate a full, single-file Python app based on:
|
414 |
'{prompt_desc}'
|
415 |
+
Ensure the code is runnable as `app.py` in a Hugging Face Space using the `{space_sdk}` SDK. Include necessary imports and setup.
|
416 |
+
Return **only** the python code block for `app.py`. Do not include any extra text, explanations, or markdown outside the code block.
|
417 |
"""
|
418 |
try:
|
419 |
+
history = add_bot_message(history, f"🧠 Generating `{prompt_desc}` `{space_sdk}` app (`app.py`) code with Gemini...")
|
420 |
# Yield to show message before the potentially time-consuming API call
|
421 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
422 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
423 |
|
424 |
+
# Perform the Gemini API call to generate code, optionally using grounding
|
425 |
+
code = call_gemini(prompt, gemini_api_key, gemini_model, use_grounding=use_grounding)
|
426 |
code = code.strip()
|
427 |
# Clean up common markdown code block formatting if present
|
428 |
if code.startswith("```python"):
|
429 |
code = code[len("```python"):].strip()
|
430 |
+
if code.startswith("```"): # Handle generic code blocks too
|
431 |
+
code = code[len("```"):].strip()
|
432 |
if code.endswith("```"):
|
433 |
code = code[:-len("```")].strip()
|
434 |
|
|
|
440 |
generated_code = code # Store the generated code in the state variable for the next step
|
441 |
# Yield updated state variables and history
|
442 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
443 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
444 |
# No return needed
|
445 |
|
446 |
except Exception as e:
|
447 |
history = add_bot_message(history, f"❌ Error generating code: {e}. Click 'reset'.")
|
448 |
# Yield error message and reset state on failure
|
449 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
450 |
+
None, None, None, use_grounding) # Include use_grounding
|
451 |
# No return needed
|
452 |
|
453 |
|
|
|
457 |
if not code_to_upload:
|
458 |
history = add_bot_message(history, "Internal error: No code to upload. Resetting.")
|
459 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
460 |
+
None, None, None, use_grounding) # Include use_grounding
|
461 |
# No return needed
|
462 |
|
463 |
else:
|
464 |
history = add_bot_message(history, "☁️ Uploading `app.py`...")
|
465 |
# Yield to show message before the upload action
|
466 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
467 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
468 |
|
469 |
try:
|
470 |
# Perform the file upload action
|
|
|
474 |
generated_code = None # Clear the stored code after use to free memory/state space
|
475 |
# Yield updated state variables and history
|
476 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
477 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
478 |
# No return needed
|
479 |
|
480 |
except Exception as e:
|
481 |
+
history = add_bot_message(history, f"❌ Error uploading `app.py`: {e}. Click 'reset'.")
|
482 |
# Yield error message and reset state on failure
|
483 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
484 |
+
None, None, None, use_grounding) # Include use_grounding
|
485 |
# No return needed
|
486 |
|
487 |
|
|
|
489 |
history = add_bot_message(history, "📄 Generating `requirements.txt`...")
|
490 |
# Yield to show message before generating requirements
|
491 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
492 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
493 |
|
494 |
# Logic to determine required packages based on SDK and keywords in the app description
|
495 |
reqs_list = ["gradio"] if space_sdk == "gradio" else ["streamlit"]
|
496 |
+
# Add essential libraries regardless of description keywords or grounding
|
497 |
+
reqs_list.extend(["google-generativeai", "huggingface_hub", "Pillow", "numpy", "pandas", "requests"]) # Assume common ones might be needed
|
498 |
+
# Add scikit-image and opencv if description implies image processing
|
499 |
+
if app_desc:
|
500 |
app_desc_lower = app_desc.lower()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
501 |
if any(lib in app_desc_lower for lib in ["scikit-image", "skimage", "cv2", "opencv"]):
|
502 |
reqs_list.extend(["scikit-image", "opencv-python"]) # Note: opencv-python for pip
|
503 |
|
|
|
|
|
504 |
|
505 |
# Use dict.fromkeys to get unique items while preserving insertion order (Python 3.7+)
|
506 |
reqs_list = list(dict.fromkeys(reqs_list))
|
|
|
512 |
generated_code = reqs_content # Store requirements content
|
513 |
# Yield updated state variables and history
|
514 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
515 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
516 |
# No return needed
|
517 |
|
518 |
|
|
|
522 |
if not reqs_content_to_upload:
|
523 |
history = add_bot_message(history, "Internal error: No requirements content to upload. Resetting.")
|
524 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
525 |
+
None, None, None, use_grounding) # Include use_grounding
|
526 |
# No return needed
|
527 |
|
528 |
else:
|
529 |
history = add_bot_message(history, "☁️ Uploading `requirements.txt`...")
|
530 |
# Yield to show message before upload
|
531 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
532 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
533 |
|
534 |
try:
|
535 |
# Perform requirements file upload
|
|
|
539 |
generated_code = None # Clear content after use
|
540 |
# Yield updated state variables and history
|
541 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
542 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
543 |
# No return needed
|
544 |
|
545 |
except Exception as e:
|
546 |
+
history = add_bot_message(history, f"❌ Error uploading `requirements.txt`: {e}. Click 'reset'.")
|
547 |
# Yield error message and reset state on failure
|
548 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
549 |
+
None, None, None, use_grounding) # Include use_grounding
|
550 |
# No return needed
|
551 |
|
552 |
elif state == STATE_GENERATING_README:
|
553 |
history = add_bot_message(history, "📝 Generating `README.md`...")
|
554 |
# Yield message before generating README
|
555 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
556 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
557 |
|
558 |
# Generate simple README content with Space metadata header
|
559 |
readme_title = repo_name if repo_name else "My Awesome Space"
|
|
|
581 |
generated_code = readme_content # Store README content
|
582 |
# Yield updated state variables and history
|
583 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
584 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
585 |
# No return needed
|
586 |
|
587 |
|
|
|
591 |
if not readme_content_to_upload:
|
592 |
history = add_bot_message(history, "Internal error: No README content to upload. Resetting.")
|
593 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
594 |
+
None, None, None, use_grounding) # Include use_grounding
|
595 |
# No return needed
|
596 |
|
597 |
else:
|
598 |
history = add_bot_message(history, "☁️ Uploading `README.md`...")
|
599 |
# Yield message before upload
|
600 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
601 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
602 |
|
603 |
try:
|
604 |
# Perform README file upload
|
|
|
608 |
generated_code = None # Clear content after use
|
609 |
# Yield updated state variables and history
|
610 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
611 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
612 |
# No return needed
|
613 |
|
614 |
except Exception as e:
|
615 |
+
history = add_bot_message(history, f"❌ Error uploading `README.md`: {e}. Click 'reset'.")
|
616 |
# Yield error message and reset state on failure
|
617 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
618 |
+
None, None, None, use_grounding) # Include use_grounding
|
619 |
# No return needed
|
620 |
|
621 |
elif state == STATE_CHECKING_LOGS_BUILD:
|
622 |
history = add_bot_message(history, "🔍 Fetching build logs...")
|
623 |
# Yield message before fetching logs (which includes a delay)
|
624 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
625 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
626 |
|
627 |
# Fetch build logs from HF Space
|
628 |
build_logs_text = get_build_logs_action(repo_id, hf_profile, hf_token)
|
|
|
634 |
state = STATE_CHECKING_LOGS_RUN # Transition even on build error, to see if container starts
|
635 |
# Yield updated state, logs, and variables
|
636 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
637 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
638 |
# No return needed
|
639 |
|
640 |
else:
|
|
|
642 |
state = STATE_CHECKING_LOGS_RUN # Transition to next log check
|
643 |
# Yield updated state, logs, and variables
|
644 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
645 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
646 |
# No return needed
|
647 |
|
648 |
|
|
|
650 |
history = add_bot_message(history, "🔍 Fetching container logs...")
|
651 |
# Yield message before fetching logs (includes a delay)
|
652 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
653 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
654 |
|
655 |
# Fetch container logs from HF Space
|
656 |
container_logs_text = get_container_logs_action(repo_id, hf_profile, hf_token)
|
|
|
663 |
state = STATE_DEBUGGING_CODE # Transition to the debugging state
|
664 |
# Yield updated state, logs, attempts, and variables
|
665 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
666 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
667 |
# No return needed
|
668 |
|
669 |
elif ("error" in updated_run.lower() or "exception" in updated_run.lower()) and attempts >= MAX_DEBUG_ATTEMPTS:
|
|
|
672 |
state = STATE_COMPLETE # Workflow ends on failure after attempts
|
673 |
# Yield updated state, logs, attempts, and variables
|
674 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
675 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
676 |
# No return needed
|
677 |
|
678 |
else:
|
|
|
681 |
state = STATE_COMPLETE # Workflow ends on success
|
682 |
# Yield updated state, logs, attempts, and variables
|
683 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
684 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
685 |
# No return needed
|
686 |
|
687 |
|
|
|
689 |
history = add_bot_message(history, f"🧠 Calling Gemini to generate fix based on logs...")
|
690 |
# Yield message before Gemini API call
|
691 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
692 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
693 |
|
694 |
# Construct prompt for Gemini including the container logs
|
695 |
debug_prompt = f"""
|
696 |
You are debugging a {space_sdk} Space. The goal is to fix the code in `app.py` based on the container logs provided.
|
697 |
|
698 |
Here are the container logs:
|
|
|
|
|
699 |
{updated_run}
|
700 |
Generate the *complete, fixed* content for `app.py` based on these logs.
|
701 |
Return **only** the python code block for app.py. Do not include any extra text, explanations, or markdown outside the code block.
|
702 |
"""
|
703 |
try:
|
704 |
+
# Call Gemini to generate the corrected code, optionally using grounding
|
705 |
+
# Note: Grounding might be less effective for debugging based *only* on logs,
|
706 |
+
# but we include the option as requested.
|
707 |
+
fix_code = call_gemini(debug_prompt, gemini_api_key, gemini_model, use_grounding=use_grounding)
|
708 |
fix_code = fix_code.strip()
|
709 |
# Clean up potential markdown formatting
|
710 |
if fix_code.startswith("```python"):
|
711 |
fix_code = fix_code[len("```python"):].strip()
|
712 |
+
if fix_code.startswith("```"):
|
713 |
+
fix_code = fix_code[len("```"):].strip()
|
714 |
if fix_code.endswith("```"):
|
715 |
fix_code = fix_code[:-len("```")].strip()
|
716 |
|
|
|
722 |
generated_code = fix_code # Store the generated fix code
|
723 |
# Yield updated state, code, and variables
|
724 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
725 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
726 |
# No return needed
|
727 |
|
728 |
except Exception as e:
|
729 |
history = add_bot_message(history, f"❌ Error generating debug code: {e}. Click 'reset'.")
|
730 |
# Yield error message and reset state on failure
|
731 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
732 |
+
None, None, None, use_grounding) # Include use_grounding
|
733 |
# No return needed
|
734 |
|
735 |
elif state == STATE_UPLOADING_FIXED_APP_PY:
|
|
|
738 |
if not fixed_code_to_upload:
|
739 |
history = add_bot_message(history, "Internal error: No fixed code available to upload. Resetting.")
|
740 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
741 |
+
None, None, None, use_grounding) # Include use_grounding
|
742 |
# No return needed
|
743 |
|
744 |
else:
|
745 |
history = add_bot_message(history, "☁️ Uploading fixed `app.py`...")
|
746 |
# Yield message before upload
|
747 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
748 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
749 |
|
750 |
try:
|
751 |
# Perform the upload of the fixed app.py
|
|
|
755 |
generated_code = None # Clear code after use
|
756 |
# Yield updated state, code, and variables
|
757 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
758 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
759 |
# No return needed
|
760 |
|
761 |
except Exception as e:
|
762 |
+
history = add_bot_message(history, f"❌ Error uploading fixed `app.py`: {e}. Click 'reset'.")
|
763 |
# Yield error message and reset state on failure
|
764 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
765 |
+
None, None, None, use_grounding) # Include use_grounding
|
766 |
# No return needed
|
767 |
|
768 |
elif state == STATE_COMPLETE:
|
769 |
# If in the complete state, the workflow is finished for this project.
|
770 |
# Subsequent clicks just add user messages; we simply yield the current state.
|
771 |
yield (history, repo_id, state, updated_preview, updated_run, updated_build,
|
772 |
+
attempts, app_desc, repo_name, generated_code, use_grounding) # Include use_grounding
|
773 |
# No return needed
|
774 |
|
775 |
|
|
|
780 |
print(f"Critical Error in state {state}: {e}") # Log the error for debugging purposes
|
781 |
# Yield an error state and reset essential workflow variables on critical failure
|
782 |
yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0,
|
783 |
+
None, None, None, use_grounding) # Include use_grounding
|
784 |
# No return needed after yield
|
785 |
|
786 |
|
|
|
790 |
# Gradio State variables - these persist their values across user interactions (clicks)
|
791 |
hf_profile = gr.State(None)
|
792 |
hf_token = gr.State(None)
|
793 |
+
# FIX: Initialize gemini_key state from env var on load
|
794 |
+
gemini_key = gr.State(os.environ.get("GOOGLE_API_KEY"))
|
795 |
gemini_model = gr.State("gemini-1.5-flash") # Default selected model
|
796 |
repo_id = gr.State(None) # Stores the ID of the created Space
|
797 |
workflow = gr.State(STATE_IDLE) # Stores the current state of the AI workflow
|
|
|
800 |
app_description = gr.State(None) # Stores the user's initial description of the desired app
|
801 |
repo_name_state = gr.State(None) # Stores the chosen repository name for the Space
|
802 |
generated_code_state = gr.State(None) # Temporary storage for generated file content (app.py, reqs, README)
|
803 |
+
# New State variable for grounding checkbox
|
804 |
+
use_grounding_state = gr.State(False)
|
805 |
|
806 |
with gr.Row():
|
807 |
# Sidebar column for inputs and status displays
|
|
|
812 |
login_btn = gr.LoginButton(variant="huggingface")
|
813 |
|
814 |
# Initial load event to check login status (if cached)
|
815 |
+
# This also ensures the gemini_key state is potentially read from env var
|
816 |
ai_builder_tab.load(show_profile, outputs=login_status)
|
817 |
# Update status display when login button reports success
|
818 |
login_btn.click(show_profile, outputs=login_status)
|
819 |
|
820 |
+
gr.Markdown("## Google AI Studio / Gemini")
|
821 |
# Textbox for Gemini API key. Read from environment variable if available.
|
822 |
gemini_input = gr.Textbox(
|
823 |
label="API Key",
|
824 |
type="password", # Hides input for security
|
825 |
interactive=True,
|
826 |
+
value=os.environ.get("GOOGLE_API_KEY"), # Pre-fill if GOOGLE_API_KEY env var is set
|
827 |
+
info="Get your key from Google AI Studio"
|
828 |
)
|
829 |
gemini_status = gr.Markdown("") # Display Gemini configuration status
|
830 |
|
|
|
831 |
# Radio buttons to select the Gemini model
|
832 |
model_selector = gr.Radio(
|
833 |
choices=[
|
|
|
841 |
)
|
842 |
|
843 |
# Configure Gemini status on initial load (if API key env var is set)
|
844 |
+
# This uses the *initial* value of gemini_key state
|
845 |
ai_builder_tab.load(
|
846 |
configure_gemini,
|
847 |
inputs=[gemini_key, gemini_model],
|
848 |
outputs=[gemini_status]
|
849 |
)
|
850 |
|
851 |
+
# New checkbox for optional grounding
|
852 |
+
use_grounding_checkbox = gr.Checkbox(
|
853 |
+
label="Enable Grounding with Google Search",
|
854 |
+
value=False, # Default to off
|
855 |
+
interactive=True,
|
856 |
+
info="Use Google Search results to inform Gemini's response (may improve factuality)."
|
857 |
+
)
|
858 |
+
# Link checkbox change to update the state variable
|
859 |
+
use_grounding_checkbox.change(
|
860 |
+
lambda v: v, inputs=use_grounding_checkbox, outputs=use_grounding_state
|
861 |
+
)
|
862 |
+
|
863 |
+
|
864 |
gr.Markdown("## Space SDK")
|
865 |
# Radio buttons to select the Space SDK (Gradio or Streamlit)
|
866 |
sdk_selector = gr.Radio(choices=["gradio","streamlit"], value="gradio", label="Template SDK", interactive=True)
|
|
|
915 |
status_str = "✅ Ready to send commands."
|
916 |
else:
|
917 |
status_str = " ".join(status_parts)
|
918 |
+
# Fallback, should not be needed if not is_ready, but good practice
|
919 |
+
if not status_str:
|
920 |
status_str = "Checking prerequisites..."
|
921 |
|
|
|
922 |
# gr.update is used to dynamically change a component's properties
|
923 |
return gr.update(interactive=is_ready), status_str
|
924 |
|
|
|
997 |
gemini_key, gemini_model, # Gemini State variables
|
998 |
repo_id, workflow, sdk_state, # Workflow State variables
|
999 |
iframe, run_txt, build_txt, # UI outputs whose current values are needed by the generator
|
1000 |
+
debug_attempts, app_description, repo_name_state, generated_code_state, # Other State variables
|
1001 |
+
use_grounding_state # Add the new grounding state input
|
1002 |
],
|
1003 |
# Outputs are updated by the values yielded from the generator
|
1004 |
outputs=[
|
1005 |
chatbot, # Update Chatbot with new messages
|
1006 |
repo_id, workflow, # Update workflow State variables
|
1007 |
iframe, run_txt, build_txt, # Update UI outputs
|
1008 |
+
debug_attempts, app_description, repo_name_state, generated_code_state, # Update other State variables
|
1009 |
+
use_grounding_state # Add the new grounding state output (generators must yield/return all state they modify/pass through)
|
1010 |
]
|
1011 |
).success( # Chain a .success() event to run *after* the .click() handler completes without error
|
1012 |
# Clear the user input textbox after the message is sent and processed
|
|
|
1047 |
os.makedirs(os.environ["GRADIO_TEMP_DIR"], exist_ok=True) # Ensure the directory exists
|
1048 |
|
1049 |
# Launch the Gradio UI
|
1050 |
+
ai_builder_tab.launch()
|