import os import re import time import json import io import requests import gradio as gr import google.generativeai as genai from google.generativeai import types # Import types for configuration and tools from huggingface_hub import create_repo, list_models, upload_file, constants from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status # Add debugging print to check environment variable immediately print(f"Attempting to read GOOGLE_API_KEY from environment: {os.environ.get('GOOGLE_API_KEY')}") # --- Helper functions for Hugging Face integration --- def show_profile(profile: gr.OAuthProfile | None) -> str: """Displays the logged-in Hugging Face profile username.""" if profile is None: return "*Not logged in.*" return f"✅ Logged in as **{profile.username}**" def list_private_models( profile: gr.OAuthProfile | None, oauth_token: gr.OAuthToken | None ) -> str: """Lists private models for the logged-in user (not used in the main workflow, but kept).""" if profile is None or oauth_token is None: return "Please log in to see your models." try: models = [ f"{m.id} ({'private' if m.private else 'public'})" for m in list_models(author=profile.username, token=oauth_token.token) ] return "No models found." if not models else "Models:\n\n" + "\n - ".join(models) except Exception as e: # Catching generic exception is acceptable for helper functions return f"Error listing models: {e}" def create_space_action(repo_name: str, sdk: str, profile: gr.OAuthProfile, token: gr.OAuthToken): """Creates a new Hugging Face Space repository.""" if not profile or not token: raise ValueError("Hugging Face profile or token is missing.") repo_id = f"{profile.username}/{repo_name}" try: create_repo( repo_id=repo_id, token=token.token, exist_ok=True, # Allow creating if it already exists repo_type="space", space_sdk=sdk ) url = f"https://huggingface.co/spaces/{repo_id}" iframe = f'' return repo_id, iframe except Exception as e: raise RuntimeError(f"Failed to create Space `{repo_id}`: {e}") def upload_file_to_space_action( file_obj: io.StringIO, # Specify type hint for clarity path_in_repo: str, repo_id: str, profile: gr.OAuthProfile, token: gr.OAuthToken ) -> None: """Uploads a file to a Hugging Face Space repository.""" if not (profile and token and repo_id): raise ValueError("Hugging Face profile, token, or repo_id is missing.") try: upload_file( path_or_fileobj=file_obj, path_in_repo=path_in_repo, repo_id=repo_id, token=token.token, repo_type="space" ) except Exception as e: raise RuntimeError(f"Failed to upload `{path_in_repo}` to `{repo_id}`: {e}") def _fetch_space_logs_level(repo_id: str, level: str, token: str) -> str: """Fetches build or run logs for a Space.""" if not repo_id or not token: return f"Cannot fetch {level} logs: repo_id or token missing." jwt_url = f"{constants.ENDPOINT}/api/spaces/{repo_id}/jwt" try: r = get_session().get(jwt_url, headers=build_hf_headers(token=token)) hf_raise_for_status(r) # Raise HTTPError for bad responses (4xx or 5xx) jwt = r.json()["token"] logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}" lines, count = [], 0 # Using stream=True is good for potentially large logs with get_session().get(logs_url, headers=build_hf_headers(token=jwt), stream=True, timeout=30) as resp: hf_raise_for_status(resp) for raw in resp.iter_lines(): if count >= 200: # Limit output lines to prevent UI overload lines.append("... truncated ...") break if not raw.startswith(b"data: "): # EventStream protocol expected from HF logs API continue payload = raw[len(b"data: "):] try: event = json.loads(payload.decode()) ts = event.get("timestamp", "") txt = event.get("data", "").strip() if txt: lines.append(f"[{ts}] {txt}") count += 1 except json.JSONDecodeError: # Skip lines that aren't valid JSON events continue return "\n".join(lines) if lines else f"No {level} logs found." except Exception as e: # Catching generic exception is acceptable for helper functions return f"Error fetching {level} logs for `{repo_id}`: {e}" def get_build_logs_action(repo_id, profile, token): """Action to fetch build logs with a small delay.""" if not (repo_id and profile and token): return "⚠️ Cannot fetch build logs: log in and create a Space first." # Small delay to allow build process to potentially start on HF side time.sleep(5) return _fetch_space_logs_level(repo_id, "build", token.token) def get_container_logs_action(repo_id, profile, token): """Action to fetch container logs with a delay.""" if not (repo_id and profile and token): return "⚠️ Cannot fetch container logs: log in and create a Space first." # Longer delay to allow container to start after build completes time.sleep(10) return _fetch_space_logs_level(repo_id, "run", token.token) # --- Google Gemini integration with model selection and grounding --- def configure_gemini(api_key: str | None, model_name: str | None) -> str: """Configures the Gemini API and checks if the model is accessible.""" if not api_key: return "⚠️ Gemini API key is not set." if not model_name: return "⚠️ Please select a Gemini model." try: genai.configure(api_key=api_key) # Attempt a simple call to verify credentials and model availability # This will raise an exception if the key is invalid or model not found genai.GenerativeModel(model_name).generate_content("ping", stream=False) # This message indicates the API call *for configuration check* was successful return f"✅ Gemini configured successfully with **{model_name}**." except Exception as e: # This message indicates the API call *for configuration check* failed return f"❌ Error configuring Gemini: {e}" def call_gemini(prompt: str, api_key: str, model_name: str, use_grounding: bool = False) -> str: """Calls the Gemini API with a given prompt, optionally using grounding.""" # This check is crucial - it will raise an error *before* the API call if prereqs aren't met if not api_key or not model_name: raise ValueError("Gemini API key or model not set.") try: genai.configure(api_key=api_key) model = genai.GenerativeModel(model_name) # Define tools for grounding if requested. # Using genai.types.GoogleSearch() is recommended for Gemini 2.0+ # and is backwards compatible with 1.5 for retrieval. tools_config = [types.Tool(google_search=types.GoogleSearch())] if use_grounding else None # Using generate_content and stream=False for simplicity here response = model.generate_content( prompt, stream=False, tools=tools_config # Pass the tools configuration ) # Check if response is blocked if response.prompt_feedback and response.prompt_feedback.block_reason: raise RuntimeError(f"Gemini API call blocked: {response.prompt_feedback.block_reason}") if not response.candidates: # Check for safety ratings if no candidates are returned but not blocked if response.prompt_feedback and response.prompt_feedback.safety_ratings: ratings = "; ".join([f"{r.category}: {r.probability}" for r in response.prompt_feedback.safety_ratings]) raise RuntimeError(f"Gemini API call returned no candidates. Safety ratings: {ratings}") else: raise RuntimeError("Gemini API call returned no candidates.") # If response.candidates is not empty, get the text # Using response.text is a convenient way to get text from the first candidate part return response.text or "" # Return empty string if no text except Exception as e: # Re-raising as RuntimeError for the workflow to catch and manage raise RuntimeError(f"Gemini API call failed: {e}") # --- AI workflow logic (State Machine) --- # Define States for the workflow STATE_IDLE = "idle" STATE_AWAITING_REPO_NAME = "awaiting_repo_name" STATE_CREATING_SPACE = "creating_space" STATE_GENERATING_CODE = "generating_code" STATE_UPLOADING_APP_PY = "uploading_app_py" STATE_GENERATING_REQUIREMENTS = "generating_requirements" STATE_UPLOADING_REQUIREMENTS = "uploading_requirements" STATE_GENERATING_README = "generating_readme" STATE_UPLOADING_README = "uploading_readme" STATE_CHECKING_LOGS_BUILD = "checking_logs_build" STATE_CHECKING_LOGS_RUN = "checking_logs_run" STATE_DEBUGGING_CODE = "debugging_code" STATE_UPLOADING_FIXED_APP_PY = "uploading_fixed_app_py" STATE_COMPLETE = "complete" MAX_DEBUG_ATTEMPTS = 3 # Limit the number of automatic debug attempts def add_bot_message(history: list[dict], bot_message: str) -> list[dict]: """Helper to add a new assistant message to the chatbot history.""" history.append({"role": "assistant", "content": bot_message}) return history # Add an initial welcome message to the chatbot (defined outside Blocks to be called by load chain) def greet(): return [{"role": "assistant", "content": "Welcome! Please log in to Hugging Face and provide your Google AI Studio API key to start building Spaces. Once ready, type 'generate me a gradio app called myapp' or 'create' to begin."}] # Helper function to update send button interactivity based on prereqs # MODIFIED to return gr.update # FIX: Added *args, **kwargs to accept any extra arguments passed by Gradio chaining def check_send_button_ready(profile: gr.OAuthProfile | None, token: gr.OAuthToken | None, api_key: str | None, model_name: str | None, *args, **kwargs) -> gr.update: """Checks if HF login and Gemini configuration are complete and returns update for button interactivity.""" # --- START ENHANCED DEBUGGING LOGS --- print("\n--- check_send_button_ready START ---") print(f" Received profile type: {type(profile)}, is None: {profile is None}") print(f" Received token type: {type(token)}, is None: {token is None}") # For api_key, print part of the key if not None for verification, be careful with full key print(f" Received api_key is None: {api_key is None}, first 5 chars: {api_key[:5] if api_key else 'N/A'}") print(f" Received model_name: {model_name}") # --- END ENHANCED DEBUGGING LOGS --- is_logged_in = profile is not None and token is not None is_gemini_ready = api_key is not None and model_name is not None # --- CONTINUED DEBUGGING LOGS --- print(f" HF check: {profile is not None} and {token is not None} = {is_logged_in}") print(f" Gemini check: {api_key is not None} and {model_name is not None} = {is_gemini_ready}") # --- END CONTINUED DEBUGGING LOGS --- is_ready = is_logged_in and is_gemini_ready print(f"check_send_button_ready - HF Ready: {is_logged_in}, Gemini Ready: {is_gemini_ready}, Button Ready (boolean): {is_ready}") print("--- check_send_button_ready END ---\n") return gr.update(interactive=is_ready) # This is the main generator function for the workflow, triggered by the 'Send' button # NOTE: This function MUST accept ALL state variables as inputs that it might need to modify or pass through. # It MUST also yield/return ALL state variables in the same order they appear in the `outputs` list of the `.click()` event. def ai_workflow_chat( message: str, history: list[dict], hf_profile: gr.OAuthProfile | None, hf_token: gr.OAuthToken | None, gemini_api_key: str | None, gemini_model: str | None, repo_id_state: str | None, workflow_state: str, space_sdk: str, # NOTE: UI component values are passed *by value* to the generator preview_html: str, # Value from iframe HTML container_logs: str, # Value from run_txt Textbox build_logs: str, # Value from build_txt Textbox debug_attempts_state: int, app_description_state: str | None, repo_name_state: str | None, generated_code_state: str | None, use_grounding_state: bool, # Value from use_grounding_checkbox # Absorb potential extra args passed by Gradio event listeners (e.g. old value, event data) *args, **kwargs ) -> tuple[ list[dict], # 0: Updated chat history (for chatbot) str | None, # 1: Updated repo_id (for repo_id state) str, # 2: Updated workflow state (for workflow state) str, # 3: Updated iframe HTML (for iframe UI component) str, # 4: Updated container logs (for run_txt UI component) str, # 5: Updated build logs (for build_txt UI component) int, # 6: Updated debug attempts count (for debug_attempts state) str | None, # 7: Updated app description (for app_description state) str | None, # 8: Updated repo name (for repo_name_state state) str | None, # 9: Updated generated code (for generated_code_state state) bool, # 10: Updated use_grounding_state (for use_grounding_state state) ]: """ Generator function to handle the AI workflow state machine. Each 'yield' pauses execution and sends values to update Gradio outputs/state. """ # Unpack state variables from Gradio State components passed as inputs repo_id = repo_id_state state = workflow_state attempts = debug_attempts_state app_desc = app_description_state repo_name = repo_name_state generated_code = generated_code_state use_grounding = use_grounding_state # Unpack grounding state # Keep copies of potentially updated UI elements passed as inputs to update them later # These are the *current values* of the UI components as of the button click updated_preview = preview_html updated_build = build_logs updated_run = container_logs # Add the user's message to the chat history immediately user_message_entry = {"role": "user", "content": message} # Add username if logged in (optional, but nice) if hf_profile and hf_profile.username: user_message_entry["name"] = hf_profile.username history.append(user_message_entry) # Yield immediately to update the chat UI with the user's message # This provides immediate feedback to the user while the AI processes # Ensure all state variables and UI outputs are yielded back in the correct order yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) try: # --- State Machine Logic based on the current 'state' variable --- if state == STATE_IDLE: # Check workflow prerequisites before starting any workflow actions # The Send button should already be disabled if these aren't met, but double-check # Note: These checks here are for the *workflow logic*, not the button interactivity logic. # The button state is controlled by check_send_button_ready and the .then chains. if not (hf_profile and hf_token): # This case should ideally not be reachable if the button is correctly disabled history = add_bot_message(history, "Workflow paused: Please log in to Hugging Face first.") yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) return # Stop workflow execution for this click if not (gemini_api_key and gemini_model): # This case should also ideally not be reachable if the button is correctly disabled history = add_bot_message(history, "Workflow cannot start: Please ensure your Gemini API key is entered and a model is selected.") yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) return # Stop workflow execution for this click # Look for specific commands in the user's message reset_match = "reset" in message.lower() # Capture app description AND repo name using regex generate_match = re.search(r'generate (?:me )?(?:a|an) (.+) app called (\w+)', message, re.I) # Capture repo name for a simple 'create space' command create_match = re.search(r'create (?:a|an)? space called (\w+)', message, re.I) if reset_match: # Reset the workflow state and associated variables history = add_bot_message(history, "Workflow reset.") # Yield updated history and reset state variables to their initial values # Also reset UI outputs to their initial state yield (history, None, STATE_IDLE, "

No Space created yet.

", "", "", 0, None, None, None, False) # Reset use_grounding to default False, and other states to None/default # No return needed after yield in this generator pattern; execution for this click ends here. elif generate_match: # User requested generation with description and name new_app_desc = generate_match.group(1).strip() # Capture description part new_repo_name = generate_match.group(2).strip() # Capture name part history = add_bot_message(history, f"Acknowledged: '{message}'. Starting workflow to create Space `{hf_profile.username}/{new_repo_name}` for a '{new_app_desc}' app.") # Update state variables for the next step (creation) state = STATE_CREATING_SPACE repo_name = new_repo_name app_desc = new_app_desc # Yield updated history and state variables (pass UI outputs through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed elif create_match: # User requested simple space creation with a name new_repo_name = create_match.group(1).strip() history = add_bot_message(history, f"Acknowledged: '{message}'. Starting workflow to create Space `{hf_profile.username}/{new_repo_name}`.") state = STATE_CREATING_SPACE # Transition state to creation repo_name = new_repo_name # Store the validated repo name # Yield updated history and state variables (pass UI outputs through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed elif "create" in message.lower() and not repo_id: # User wants to create but didn't specify a name yet history = add_bot_message(history, "Okay, what should the Space be called? (e.g., `my-awesome-app`)") state = STATE_AWAITING_REPO_NAME # Transition to the state where we wait for the name # Yield updated history and state (pass UI outputs through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed else: # Command not recognized in IDLE state history = add_bot_message(history, "Command not recognized. Try 'generate me a gradio app called myapp', or 'reset'.") # Yield updated history and current state (pass UI outputs through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed elif state == STATE_AWAITING_REPO_NAME: # User's message is expected to be the repo name new_repo_name = message.strip() # Basic validation for Hugging Face repo name format # Allow letters, numbers, hyphens, underscores, max 100 chars (HF limit check) if not new_repo_name or re.search(r'[^a-zA-Z0-9_-]', new_repo_name) or len(new_repo_name) > 100: history = add_bot_message(history, "Invalid name. Please provide a single word/slug for the Space name (letters, numbers, underscores, hyphens only, max 100 chars).") # Stay in AWAITING_REPO_NAME state and yield message (pass UI outputs through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed else: history = add_bot_message(history, f"Using Space name `{new_repo_name}`. Creating Space `{hf_profile.username}/{new_repo_name}`...") state = STATE_CREATING_SPACE # Transition state to creation repo_name = new_repo_name # Store the validated repo name # Yield updated history, state, and repo name. UI outputs remain unchanged for now. # The next click will proceed from the STATE_CREATING_SPACE block. yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed # Note: Each 'elif' block below represents a distinct step in the workflow triggered # when the 'state' variable matches its condition on a button click. elif state == STATE_CREATING_SPACE: # Ensure repo_name is available (it should have been set in the previous step) if not repo_name: history = add_bot_message(history, "Internal error: Repo name missing for creation. Resetting.") # Reset relevant states and UI outputs on critical error yield (history, None, STATE_IDLE, "

Error creating space.

", "", "", 0, None, None, None, use_grounding) # Pass grounding state through # No return needed else: try: # Perform the action to create the Space on Hugging Face new_repo_id, iframe_html = create_space_action(repo_name, space_sdk, hf_profile, hf_token) updated_preview = iframe_html # Update the iframe content to show the new space repo_id = new_repo_id # Store the official repo_id history = add_bot_message(history, f"✅ Space `{repo_id}` created. Click 'Send' to generate and upload code.") state = STATE_GENERATING_CODE # Transition to the next state # Yield updated state variables and history, and the new iframe HTML yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # Pass logs and grounding through # No return needed except Exception as e: history = add_bot_message(history, f"❌ Error creating space: {e}. Click 'reset'.") # Yield error message and reset state on failure yield (history, None, STATE_IDLE, "

Error creating space.

", "", "", 0, None, None, None, use_grounding) # Pass logs and grounding through # No return needed elif state == STATE_GENERATING_CODE: # Define the prompt for Gemini based on the app description or a default prompt_desc = app_desc if app_desc else f'a simple {space_sdk} app' prompt = f""" You are an AI assistant specializing in Hugging Face Spaces using the {space_sdk} SDK. Generate a full, single-file Python app based on: '{prompt_desc}' Ensure the code is runnable as `app.py` in a Hugging Face Space using the `{space_sdk}` SDK. Include necessary imports and setup. Return **only** the python code block for `app.py`. Do not include any extra text, explanations, or markdown outside the code block. """ try: history = add_bot_message(history, f"🧠 Generating `{prompt_desc}` `{space_sdk}` app (`app.py`) code with Gemini...") if use_grounding: history = add_bot_message(history, "(Using Grounding with Google Search)") # Yield to show message before the potentially time-consuming API call yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # Perform the Gemini API call to generate code, optionally using grounding code = call_gemini(prompt, gemini_api_key, gemini_model, use_grounding=use_grounding) code = code.strip() # Clean up common markdown code block formatting if present if code.startswith("```python"): code = code[len("```python"):].strip() if code.startswith("```"): # Handle generic code blocks too code = code[len("```"):].strip() if code.endswith("```"): code = code[:-len("```")].strip() if not code: raise ValueError("Gemini returned empty code.") history = add_bot_message(history, "✅ `app.py` code generated. Click 'Send' to upload.") state = STATE_UPLOADING_APP_PY # Transition to the upload state generated_code = code # Store the generated code in the state variable for the next step # Yield updated state variables and history (pass UI outputs and other states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed except Exception as e: history = add_bot_message(history, f"❌ Error generating code: {e}. Click 'reset'.") # Yield error message and reset state on failure yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None, use_grounding) # No return needed elif state == STATE_UPLOADING_APP_PY: # Retrieve the generated code from the state variable code_to_upload = generated_code if not code_to_upload: history = add_bot_message(history, "Internal error: No code to upload. Resetting.") yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None, use_grounding) # No return needed else: history = add_bot_message(history, "☁️ Uploading `app.py`...") # Yield to show message before the upload action (pass UI outputs and states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) try: # Perform the file upload action upload_file_to_space_action(io.StringIO(code_to_upload), "app.py", repo_id, hf_profile, hf_token) history = add_bot_message(history, "✅ Uploaded `app.py`. Click 'Send' to generate requirements.") state = STATE_GENERATING_REQUIREMENTS # Transition state generated_code = None # Clear the stored code after use to free memory/state space # Yield updated state variables and history (pass UI outputs and other states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed except Exception as e: history = add_bot_message(history, f"❌ Error uploading `app.py`: {e}. Click 'reset'.") # Yield error message and reset state on failure yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None, use_grounding) # No return needed elif state == STATE_GENERATING_REQUIREMENTS: history = add_bot_message(history, "📄 Generating `requirements.txt`...") # Yield to show message before generating requirements (pass UI outputs and states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # Logic to determine required packages based on SDK and keywords in the app description reqs_list = ["gradio"] if space_sdk == "gradio" else ["streamlit"] # Add essential libraries regardless of description keywords or grounding essential_libs = ["google-generativeai", "huggingface_hub"] # Only add if Gemini is actually needed for the app (determined by description or if key is present) # If we are here, key and model are available based on STATE_IDLE checks reqs_list.extend(essential_libs) # Add common libraries if description suggests they might be needed if app_desc: app_desc_lower = app_desc.lower() if "requests" in app_desc_lower or "api" in app_desc_lower: reqs_list.append("requests") # Image processing libraries if "image" in app_desc_lower or "upload" in app_desc_lower or "blur" in app_desc_lower or "vision" in app_desc_lower or "photo" in app_desc_lower: reqs_list.append("Pillow") if "numpy" in app_desc_lower: reqs_list.append("numpy") if "pandas" in app_desc_lower or "dataframe" in app_desc_lower: reqs_list.append("pandas") # Add scikit-image and opencv if image processing is heavily implied if any(lib in app_desc_lower for lib in ["scikit-image", "skimage", "cv2", "opencv"]): reqs_list.extend(["scikit-image", "opencv-python"]) # Note: opencv-python for pip # Add transformers if large models are implied if any(lib in app_desc_lower for lib in ["transformer", "llama", "mistral", "bert", "gpt2"]): reqs_list.append("transformers") # Add torch or tensorflow if deep learning frameworks are implied if any(lib in app_desc_lower for lib in ["torch", "pytorch", "tensorflow", "keras"]): reqs_list.extend(["torch", "tensorflow"]) # Users might need specific versions, but this is a start # Use dict.fromkeys to get unique items while preserving insertion order (Python 3.7+) reqs_list = list(dict.fromkeys(reqs_list)) # Sort alphabetically for cleaner requirements.txt reqs_list.sort() reqs_content = "\n".join(reqs_list) + "\n" history = add_bot_message(history, "✅ `requirements.txt` generated. Click 'Send' to upload.") state = STATE_UPLOADING_REQUIREMENTS # Transition state generated_code = reqs_content # Store requirements content # Yield updated state variables and history (pass UI outputs and other states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed elif state == STATE_UPLOADING_REQUIREMENTS: # Retrieve requirements content from state variable reqs_content_to_upload = generated_code if not reqs_content_to_upload: history = add_bot_message(history, "Internal error: No requirements content to upload. Resetting.") yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None, use_grounding) # No return needed else: history = add_bot_message(history, "☁️ Uploading `requirements.txt`...") # Yield message before upload (pass UI outputs and states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) try: # Perform requirements file upload upload_file_to_space_action(io.StringIO(reqs_content_to_upload), "requirements.txt", repo_id, hf_profile, hf_token) history = add_bot_message(history, "✅ Uploaded `requirements.txt`. Click 'Send' to generate README.") state = STATE_GENERATING_README # Transition state generated_code = None # Clear content after use # Yield updated state variables and history (pass UI outputs and other states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed except Exception as e: history = add_bot_message(history, f"❌ Error uploading `requirements.txt`: {e}. Click 'reset'.") # Yield error message and reset state on failure yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None, use_grounding) # No return needed elif state == STATE_GENERATING_README: history = add_bot_message(history, "📝 Generating `README.md`...") # Yield message before generating README (pass UI outputs and states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # Generate simple README content with Space metadata header readme_title = repo_name if repo_name else "My Awesome Space" readme_description = app_desc if app_desc else f"This Hugging Face Space hosts an AI-generated {space_sdk} application." readme_content = f"""--- title: {readme_title} emoji: 🚀 colorFrom: blue colorTo: yellow sdk: {space_sdk} app_file: app.py pinned: false --- # {readme_title} {readme_description} This Space was automatically generated by an AI workflow using Google Gemini and Gradio. """ # Added Space metadata header and slightly improved content history = add_bot_message(history, "✅ `README.md` generated. Click 'Send' to upload.") state = STATE_UPLOADING_README # Transition state generated_code = readme_content # Store README content # Yield updated state variables and history (pass UI outputs and other states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed elif state == STATE_UPLOADING_README: # Retrieve README content from state variable readme_content_to_upload = generated_code if not readme_content_to_upload: history = add_bot_message(history, "Internal error: No README content to upload. Resetting.") yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None, use_grounding) # No return needed else: history = add_bot_message(history, "☁️ Uploading `README.md`...") # Yield message before upload (pass UI outputs and states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) try: # Perform README file upload upload_file_to_space_action(io.StringIO(readme_content_to_upload), "README.md", repo_id, hf_profile, hf_token) history = add_bot_message(history, "✅ Uploaded `README.md`. All files uploaded. Space is now building. Click 'Send' to check build logs.") state = STATE_CHECKING_LOGS_BUILD # Transition to checking build logs generated_code = None # Clear content after use # Yield updated state variables and history (pass UI outputs and other states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed except Exception as e: history = add_bot_message(history, f"❌ Error uploading `README.md`: {e}. Click 'reset'.") # Yield error message and reset state on failure yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None, use_grounding) # No return needed elif state == STATE_CHECKING_LOGS_BUILD: history = add_bot_message(history, "🔍 Fetching build logs...") # Yield message before fetching logs (which includes a delay) (pass UI outputs and states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # Fetch build logs from HF Space build_logs_text = get_build_logs_action(repo_id, hf_profile, hf_token) updated_build = build_logs_text # Update the logs display variable # Simple check for common error indicators in logs (case-insensitive) if "error" in updated_build.lower() or "exception" in updated_build.lower() or "build failed" in updated_build.lower(): history = add_bot_message(history, "⚠️ Build logs indicate potential issues. Please inspect above. Click 'Send' to check container logs (app might still start despite build warnings).") state = STATE_CHECKING_LOGS_RUN # Transition even on build error, to see if container starts # Yield updated state, logs, and variables yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed else: history = add_bot_message(history, "✅ Build logs fetched. Click 'Send' to check container logs.") state = STATE_CHECKING_LOGS_RUN # Transition to next log check # Yield updated state, logs, and variables yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed elif state == STATE_CHECKING_LOGS_RUN: history = add_bot_message(history, "🔍 Fetching container logs...") # Yield message before fetching logs (includes a delay) (pass UI outputs and states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # Fetch container logs from HF Space container_logs_text = get_container_logs_action(repo_id, hf_profile, hf_token) updated_run = container_logs_text # Update the logs display variable # Check for errors in run logs and if we have debug attempts left if ("error" in updated_run.lower() or "exception" in updated_run.lower()) and attempts < MAX_DEBUG_ATTEMPTS: attempts += 1 # Increment debug attempts counter history = add_bot_message(history, f"❌ Errors detected in container logs. Attempting debug fix #{attempts}/{MAX_DEBUG_ATTEMPTS}. Click 'Send' to proceed.") state = STATE_DEBUGGING_CODE # Transition to the debugging state # Yield updated state, logs, attempts, and variables yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed elif ("error" in updated_run.lower() or "exception" in updated_run.lower()) and attempts >= MAX_DEBUG_ATTEMPTS: # Max debug attempts reached history = add_bot_message(history, f"❌ Errors detected in container logs. Max debug attempts ({MAX_DEBUG_ATTEMPTS}) reached. Please inspect logs manually or click 'reset'.") state = STATE_COMPLETE # Workflow ends on failure after attempts # Yield updated state, logs, attempts, and variables yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed else: # No significant errors found in logs, assume success history = add_bot_message(history, "✅ App appears to be running successfully! Check the iframe above. Click 'reset' to start a new project.") state = STATE_COMPLETE # Workflow ends on success # Yield updated state, logs, attempts, and variables yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed elif state == STATE_DEBUGGING_CODE: history = add_bot_message(history, f"🧠 Calling Gemini to generate fix based on logs...") if use_grounding: history = add_bot_message(history, "(Using Grounding with Google Search)") # Yield message before Gemini API call (pass UI outputs and states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # Construct prompt for Gemini including the container logs debug_prompt = f""" You are debugging a {space_sdk} Space. The goal is to fix the code in `app.py` based on the container logs provided. Here are the container logs: Use code with caution. Python {updated_run} Generate the *complete, fixed* content for `app.py` based on these logs. Return **only** the python code block for app.py. Do not include any extra text, explanations, or markdown outside the code block. """ try: # Call Gemini to generate the corrected code, optionally using grounding # Note: Grounding might be less effective for debugging based *only* on logs, # but we include the option as requested. fix_code = call_gemini(debug_prompt, gemini_api_key, gemini_model, use_grounding=use_grounding) fix_code = fix_code.strip() # Clean up potential markdown formatting if fix_code.startswith("```python"): fix_code = fix_code[len("```python"):].strip() if fix_code.startswith("```"): fix_code = fix_code[len("```"):].strip() if fix_code.endswith("```"): fix_code = fix_code[:-len("```")].strip() if not fix_code: raise ValueError("Gemini returned empty fix code.") history = add_bot_message(history, "✅ Fix code generated. Click 'Send' to upload.") state = STATE_UPLOADING_FIXED_APP_PY # Transition to the upload state for the fix generated_code = fix_code # Store the generated fix code # Yield updated state, code, and variables (pass UI outputs and states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed except Exception as e: history = add_bot_message(history, f"❌ Error generating debug code: {e}. Click 'reset'.") # Yield error message and reset state on failure yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None, use_grounding) # No return needed elif state == STATE_UPLOADING_FIXED_APP_PY: # Retrieve the fixed code from the state variable fixed_code_to_upload = generated_code if not fixed_code_to_upload: history = add_bot_message(history, "Internal error: No fixed code available to upload. Resetting.") yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None, use_grounding) # No return needed else: history = add_bot_message(history, "☁️ Uploading fixed `app.py`...") # Yield message before upload (pass UI outputs and states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) try: # Perform the upload of the fixed app.py upload_file_to_space_action(io.StringIO(fixed_code_to_upload), "app.py", repo_id, hf_profile, hf_token) history = add_bot_message(history, "✅ Fixed `app.py` uploaded. Space will rebuild. Click 'Send' to check logs again.") state = STATE_CHECKING_LOGS_RUN # Go back to checking run logs after uploading the fix generated_code = None # Clear code after use # Yield updated state, code, and variables (pass UI outputs and states through) yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed except Exception as e: history = add_bot_message(history, f"❌ Error uploading fixed `app.py`: {e}. Click 'reset'.") # Yield error message and reset state on failure yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None, use_grounding) # No return needed elif state == STATE_COMPLETE: # If in the complete state, the workflow is finished for this project. # Subsequent clicks just add user messages; we simply yield the current state. yield (history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code, use_grounding) # No return needed except Exception as e: # This catches any unexpected errors that occur within any state's logic error_message = f"Workflow step failed unexpectedly ({state}): {e}. Click 'Send' to re-attempt this step or 'reset'." history = add_bot_message(history, error_message) print(f"Critical Error in state {state}: {e}") # Log the error for debugging purposes # Yield an error state and reset essential workflow variables on critical failure yield (history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None, use_grounding) # Include use_grounding # No return needed after yield # --- Build the Gradio UI --- with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab: # Gradio State variables - these persist their values across user interactions (clicks) # Define these first as they might be used in default values for components hf_profile = gr.State(None) hf_token = gr.State(None) # FIX: Initialize gemini_key state from env var on load gemini_key = gr.State(os.environ.get("GOOGLE_API_KEY")) gemini_model = gr.State("gemini-1.5-flash") # Default selected model repo_id = gr.State(None) # Stores the ID of the created Space workflow = gr.State(STATE_IDLE) # Stores the current state of the AI workflow sdk_state = gr.State("gradio") # Stores the selected Space SDK (Gradio or Streamlit) debug_attempts = gr.State(0) # Counter for how many debugging attempts have been made app_description = gr.State(None) # Stores the user's initial description of the desired app repo_name_state = gr.State(None) # Stores the chosen repository name for the Space generated_code_state = gr.State(None) # Temporary storage for generated file content (app.py, reqs, README) # New State variable for grounding checkbox use_grounding_state = gr.State(False) with gr.Row(): # Sidebar column for inputs and status displays with gr.Column(scale=1, min_width=300): gr.Markdown("## Hugging Face Login") # Define login_status before it's used in login_btn.click outputs login_status = gr.Markdown("*Not logged in.*") # Hugging Face Login Button login_btn = gr.LoginButton(variant="huggingface") gr.Markdown("## Google AI Studio / Gemini") # Define gemini_input and gemini_status before they are used in change handlers gemini_input = gr.Textbox( label="API Key", type="password", # Hides input for security interactive=True, value=os.environ.get("GOOGLE_API_KEY"), # Pre-fill if GOOGLE_API_KEY env var is set info="Get your key from Google AI Studio" ) gemini_status = gr.Markdown("") # Display Gemini configuration status # Define model_selector before it's used in its change handler model_selector = gr.Radio( choices=[ ("Gemini 1.5 Flash", "gemini-1.5-flash"), ("Gemini 1.5 Pro", "gemini-1.5-pro"), ("Gemini 1.0 Pro", "gemini-1.0-pro"), ], value="gemini-1.5-flash", # Default selection label="Select model", interactive=True ) # Define grounding checkbox before its change handler use_grounding_checkbox = gr.Checkbox( label="Enable Grounding with Google Search", value=False, # Default to off interactive=True, info="Use Google Search results to inform Gemini's response (may improve factuality)." ) gr.Markdown("## Space SDK") # Define sdk_selector before its change handler sdk_selector = gr.Radio(choices=["gradio","streamlit"], value="gradio", label="Template SDK", interactive=True) gr.Markdown("## Workflow Status") # Define status_text and repo_id_text before they are used in change handlers status_text = gr.Textbox(label="Current State", value=STATE_IDLE, interactive=False) repo_id_text = gr.Textbox(label="Current Space ID", value="None", interactive=False) # Main content area column with gr.Column(scale=3): # Define chatbot, user_input, send_btn before send_btn.click chatbot = gr.Chatbot(type='messages', label="AI Workflow Chat") user_input = gr.Textbox(placeholder="Type your message…", interactive=True) # Define send_btn before its click handler # Initial interactive state will be handled by the load event chain send_btn = gr.Button("Send") # Starts disabled by default (interactive=False) # Define iframe, build_txt, run_txt before they are used in send_btn.click inputs/outputs # These are UI components, NOT State variables iframe = gr.HTML("

No Space created yet.

") # HTML element for the Space iframe build_txt = gr.Textbox(label="Build Logs", lines=10, interactive=False, value="", max_lines=20) # Set max_lines for scrollability run_txt = gr.Textbox(label="Container Logs", lines=10, interactive=False, value="", max_lines=20) # Set max_lines for scrollability # --- Define Event Handlers and Chains AFTER all components are defined --- # Define the inputs used for checking prerequisites send_button_interactive_binding_inputs = [hf_profile, hf_token, gemini_key, gemini_model] # Handle login button click: Update profile/token state -> Check prereqs and update button interactivity # LoginButton outputs a tuple (OAuthProfile, OAuthToken) on success login_btn.click( lambda x: (x[0], x[1]), inputs=[login_btn], outputs=[hf_profile, hf_token] # Update HF State variables ).then( # Chain the next action after state is updated # Call the update function and bind its output to the button component check_send_button_ready, inputs=send_button_interactive_binding_inputs, outputs=[send_btn] # Update button interactivity using gr.update return value ) # Handle Gemini Key Input change: Update key state -> Configure Gemini status -> Check prereqs and update button interactivity gemini_input.change( lambda k: k, inputs=[gemini_input], outputs=[gemini_key] # Update gemini_key state ).then( configure_gemini, inputs=[gemini_key, gemini_model], outputs=[gemini_status] # Update Gemini status ).then( # Call the update function and bind its output to the button component check_send_button_ready, inputs=send_button_interactive_binding_inputs, outputs=[send_btn] # Update button interactivity using gr.update return value ) # Handle Gemini Model Selector change: Update model state -> Configure Gemini status -> Check prereqs and update button interactivity model_selector.change( lambda m: m, inputs=[model_selector], outputs=[gemini_model] # Update gemini_model state ).then( configure_gemini, inputs=[gemini_key, gemini_model], outputs=[gemini_status] # Update Gemini status ).then( # Call the update function and bind its output to the button component check_send_button_ready, inputs=send_button_interactive_binding_inputs, outputs=[send_btn] # Update button interactivity using gr.update return value ) # Handle Grounding checkbox change: update grounding state use_grounding_checkbox.change( lambda v: v, inputs=use_grounding_checkbox, outputs=use_grounding_state ) # Handle SDK selector change: update sdk state sdk_selector.change( lambda s: s, inputs=sdk_selector, outputs=sdk_state ) # Link Workflow State variable change to UI status display workflow.change(lambda s: s, inputs=workflow, outputs=status_text) # Link Repo ID State variable change to UI status display repo_id.change(lambda r: r if r else "None", inputs=repo_id, outputs=repo_id_text) # The main event handler for the Send button # This .click() event triggers the ai_workflow_chat generator function # Inputs are read from UI components AND State variables # Outputs are updated by the values yielded from the generator send_btn.click( ai_workflow_chat, # The generator function to run inputs=[ user_input, chatbot, # UI component inputs (message, current chat history) hf_profile, hf_token, # HF State variables gemini_key, gemini_model, # Gemini State variables repo_id, workflow, sdk_state, # Workflow State variables # UI component inputs whose *current values* are needed by the generator # These are NOT State variables with the same names iframe, run_txt, build_txt, debug_attempts, app_description, repo_name_state, generated_code_state, # Other State variables use_grounding_state # Add the new grounding state input ], outputs=[ chatbot, # Update Chatbot with new messages repo_id, workflow, # Update workflow State variables iframe, run_txt, build_txt, # Update UI component outputs debug_attempts, app_description, repo_name_state, generated_code_state, # Update other State variables use_grounding_state # Update the grounding state output (generators must yield/return all state they modify/pass through) ] ).success( # Chain a .success() event to run *after* the .click() handler completes without error # Clear the user input textbox after the message is sent and processed lambda: gr.update(value=""), inputs=None, outputs=user_input # Update the user input textbox ) # --- Initial Load Event Chain (Defined INSIDE gr.Blocks, AFTER components and initial bindings) --- # This chain runs once when the app loads ai_builder_tab.load( # Action 1: Show profile (loads cached login if available), does NOT need inputs show_profile, inputs=None, outputs=login_status # Update login status markdown ).then( # Action 2: Configure Gemini using the initial state values (from env var if set) # This uses the *initial* value of gemini_key and gemini_model state variables configure_gemini, inputs=[gemini_key, gemini_model], outputs=[gemini_status] # Update Gemini status display ).then( # Action 3: Check prereqs and update send button interactivity based on initial states # This uses the *initial* values of hf_profile, hf_token, gemini_key, and gemini_model states check_send_button_ready, # Use the updated helper function inputs=send_button_interactive_binding_inputs, outputs=[send_btn] # Update button interactivity using gr.update return value ).then( # Action 4: Add the initial welcome message to the chatbot greet, inputs=None, outputs=chatbot ) # The main workflow function and other helper functions are correctly defined OUTSIDE the gr.Blocks context # because they operate on the *values* passed to them by Gradio event triggers, not the UI component objects themselves. if __name__ == "__main__": # Optional: Configure retries for huggingface_hub requests to make them more robust # from requests.adapters import HTTPAdapter # from urllib3.util.retry import Retry # retry_strategy = Retry(total=5, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504]) # Define retry strategy for specific HTTP codes # adapter = HTTPAdapter(max_retries=retry_strategy) # session = get_session() # Get the session object used internally by huggingface_hub # session.mount("http://", adapter) # session.mount("https://", adapter) # Optional: Configure Gradio settings using environment variables # Set max upload size (e.g., 100MB) for files like app.py os.environ["GRADIO_MAX_FILE_SIZE"] = "100MB" # Optional: Set a local temporary directory for Gradio uploads os.environ["GRADIO_TEMP_DIR"] = "./tmp" os.makedirs(os.environ["GRADIO_TEMP_DIR"], exist_ok=True) # Ensure the directory exists # Launch the Gradio UI # The Gradio launch call blocks execution. ai_builder_tab.launch()