wuhp commited on
Commit
f106b2a
·
verified ·
1 Parent(s): 7f04c4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -43
app.py CHANGED
@@ -55,7 +55,7 @@ def create_space_action(repo_name: str, sdk: str, profile: gr.OAuthProfile, toke
55
  space_sdk=sdk
56
  )
57
  url = f"https://huggingface.co/spaces/{repo_id}"
58
- iframe = f'<iframe src="{url}" width="100%" height="500px"></iframe>'
59
  return repo_id, iframe
60
  except Exception as e:
61
  raise RuntimeError(f"Failed to create Space `{repo_id}`: {e}")
@@ -175,7 +175,13 @@ def call_gemini(prompt: str, api_key: str, model_name: str, use_grounding: bool
175
  if response.prompt_feedback and response.prompt_feedback.block_reason:
176
  raise RuntimeError(f"Gemini API call blocked: {response.prompt_feedback.block_reason}")
177
  if not response.candidates:
178
- raise RuntimeError("Gemini API call returned no candidates.")
 
 
 
 
 
 
179
 
180
  # If response.candidates is not empty, get the text
181
  # Using response.text is a convenient way to get text from the first candidate part
@@ -212,6 +218,8 @@ def add_bot_message(history: list[dict], bot_message: str) -> list[dict]:
212
  return history
213
 
214
  # This is the main generator function for the workflow, triggered by the 'Send' button
 
 
215
  def ai_workflow_chat(
216
  message: str,
217
  history: list[dict],
@@ -273,7 +281,7 @@ def ai_workflow_chat(
273
 
274
  # Yield immediately to update the chat UI with the user's message
275
  # This provides immediate feedback to the user while the AI processes
276
- # Ensure all state variables are yielded back
277
  yield (history, repo_id, state, updated_preview, updated_run, updated_build,
278
  attempts, app_desc, repo_name, generated_code, use_grounding)
279
 
@@ -282,6 +290,7 @@ def ai_workflow_chat(
282
 
283
  if state == STATE_IDLE:
284
  # Check prerequisites before starting any workflow actions
 
285
  if not (hf_profile and hf_token):
286
  history = add_bot_message(history, "Workflow paused: Please log in to Hugging Face first.")
287
  # Yield updated history and current state, then exit for this click
@@ -308,7 +317,7 @@ def ai_workflow_chat(
308
  history = add_bot_message(history, "Workflow reset.")
309
  # Yield updated history and reset state variables to their initial values
310
  yield (history, None, STATE_IDLE, "<p>No Space created yet.</p>", "", "", 0,
311
- None, None, None, False) # Reset use_grounding to default False
312
  # No return needed after yield in this generator pattern; execution for this click ends here.
313
 
314
  elif generate_match:
@@ -500,16 +509,36 @@ Return **only** the python code block for `app.py`. Do not include any extra tex
500
  # Logic to determine required packages based on SDK and keywords in the app description
501
  reqs_list = ["gradio"] if space_sdk == "gradio" else ["streamlit"]
502
  # Add essential libraries regardless of description keywords or grounding
503
- reqs_list.extend(["google-generativeai", "huggingface_hub", "Pillow", "numpy", "pandas", "requests"]) # Assume common ones might be needed
504
- # Add scikit-image and opencv if description implies image processing
 
 
 
 
505
  if app_desc:
506
  app_desc_lower = app_desc.lower()
 
 
 
 
 
 
 
 
507
  if any(lib in app_desc_lower for lib in ["scikit-image", "skimage", "cv2", "opencv"]):
508
  reqs_list.extend(["scikit-image", "opencv-python"]) # Note: opencv-python for pip
509
-
 
 
 
 
 
510
 
511
  # Use dict.fromkeys to get unique items while preserving insertion order (Python 3.7+)
512
  reqs_list = list(dict.fromkeys(reqs_list))
 
 
 
513
 
514
  reqs_content = "\n".join(reqs_list) + "\n"
515
 
@@ -800,7 +829,7 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
800
  # Gradio State variables - these persist their values across user interactions (clicks)
801
  hf_profile = gr.State(None)
802
  hf_token = gr.State(None)
803
- # FIX: Initialize gemini_key state from env var on load for robustness
804
  gemini_key = gr.State(os.environ.get("GOOGLE_API_KEY"))
805
  gemini_model = gr.State("gemini-1.5-flash") # Default selected model
806
  repo_id = gr.State(None) # Stores the ID of the created Space
@@ -821,35 +850,18 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
821
  # Hugging Face Login Button
822
  login_btn = gr.LoginButton(variant="huggingface")
823
 
824
- # Initial load event to check login status (if cached) and explicitly set gemini_key state
825
- # Chain events: Show profile -> Set Gemini Key state -> Configure Gemini -> Update Send Button
826
- def initial_setup_on_load(profile, token):
827
- # This function explicitly sets the gemini_key state from the env var
828
- # This runs *after* the profile is potentially loaded, ensuring gemini_key is set
829
- # before configure_gemini and update_send_button_state read it from state.
830
- # It needs to return the values for the next step in the chain.
831
- return profile, token, os.environ.get("GOOGLE_API_KEY")
832
-
833
- ai_builder_tab.load(
834
- initial_setup_on_load,
835
- inputs=[hf_profile, hf_token], # Pass current state values
836
- outputs=[hf_profile, hf_token, gemini_key] # Update state values
837
- ).then( # Chain after initial state setup
838
- configure_gemini,
839
- inputs=[gemini_key, gemini_model],
840
- outputs=[gemini_status] # Update Gemini status text
841
- ).then( # Chain after config status
842
- update_send_button_state,
843
- inputs=[hf_profile, hf_token, gemini_key, gemini_model],
844
- outputs=[send_btn, prereq_status] # Update button interactivity and status text
845
- )
846
-
847
-
848
  # Update status display when login button reports success
849
- login_btn.click(show_profile, outputs=login_status).then(
850
- update_send_button_state, # Recalculate status and button state
 
 
 
 
 
 
 
851
  inputs=[hf_profile, hf_token, gemini_key, gemini_model],
852
- outputs=[send_btn, prereq_status]
853
  )
854
 
855
  gr.Markdown("## Google AI Studio / Gemini")
@@ -893,7 +905,6 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
893
  update_send_button_state, inputs=[hf_profile, hf_token, gemini_key, gemini_model], outputs=[send_btn, prereq_status] # Update button/prereq status
894
  )
895
 
896
-
897
  # New checkbox for optional grounding
898
  use_grounding_checkbox = gr.Checkbox(
899
  label="Enable Grounding with Google Search",
@@ -902,6 +913,7 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
902
  info="Use Google Search results to inform Gemini's response (may improve factuality)."
903
  )
904
  # Link checkbox change to update the state variable
 
905
  use_grounding_checkbox.change(
906
  lambda v: v, inputs=use_grounding_checkbox, outputs=use_grounding_state
907
  )
@@ -934,6 +946,7 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
934
 
935
  # Helper function to control send button interactivity and prerequisite status text
936
  # This function is triggered by changes in login status and Gemini configuration
 
937
  def update_send_button_state(
938
  profile: gr.OAuthProfile | None,
939
  token: gr.OAuthToken | None,
@@ -1005,12 +1018,32 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
1005
  # When the 'repo_id' state variable changes, update the text in repo_id_text
1006
  repo_id.change(lambda r: r if r else "None", inputs=repo_id, outputs=repo_id_text)
1007
 
1008
-
1009
- # Add an initial welcome message to the chatbot when the UI loads
1010
- def greet():
1011
- return [{"role": "assistant", "content": "Welcome! Please log in to Hugging Face and provide your Google AI Studio API key to start building Spaces. Once ready, type 'generate me a gradio app called myapp' or 'create' to begin."}]
1012
-
1013
- ai_builder_tab.load(greet, outputs=chatbot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1014
 
1015
 
1016
  if __name__ == "__main__":
@@ -1031,4 +1064,5 @@ if __name__ == "__main__":
1031
  os.makedirs(os.environ["GRADIO_TEMP_DIR"], exist_ok=True) # Ensure the directory exists
1032
 
1033
  # Launch the Gradio UI
1034
- # The Gradio launch call blo
 
 
55
  space_sdk=sdk
56
  )
57
  url = f"https://huggingface.co/spaces/{repo_id}"
58
+ iframe = f'<iframe src="{url}" width="100%" height="500px"></iframe' # Fixed closing tag
59
  return repo_id, iframe
60
  except Exception as e:
61
  raise RuntimeError(f"Failed to create Space `{repo_id}`: {e}")
 
175
  if response.prompt_feedback and response.prompt_feedback.block_reason:
176
  raise RuntimeError(f"Gemini API call blocked: {response.prompt_feedback.block_reason}")
177
  if not response.candidates:
178
+ # Check for safety ratings if no candidates are returned but not blocked
179
+ if response.prompt_feedback and response.prompt_feedback.safety_ratings:
180
+ ratings = "; ".join([f"{r.category}: {r.probability}" for r in response.prompt_feedback.safety_ratings])
181
+ raise RuntimeError(f"Gemini API call returned no candidates. Safety ratings: {ratings}")
182
+ else:
183
+ raise RuntimeError("Gemini API call returned no candidates.")
184
+
185
 
186
  # If response.candidates is not empty, get the text
187
  # Using response.text is a convenient way to get text from the first candidate part
 
218
  return history
219
 
220
  # This is the main generator function for the workflow, triggered by the 'Send' button
221
+ # NOTE: This function MUST accept ALL state variables as inputs that it might need to modify or pass through.
222
+ # It MUST also yield/return ALL state variables in the same order they appear in the `outputs` list of the `.click()` event.
223
  def ai_workflow_chat(
224
  message: str,
225
  history: list[dict],
 
281
 
282
  # Yield immediately to update the chat UI with the user's message
283
  # This provides immediate feedback to the user while the AI processes
284
+ # Ensure all state variables are yielded back in the correct order
285
  yield (history, repo_id, state, updated_preview, updated_run, updated_build,
286
  attempts, app_desc, repo_name, generated_code, use_grounding)
287
 
 
290
 
291
  if state == STATE_IDLE:
292
  # Check prerequisites before starting any workflow actions
293
+ # Use the passed-in state variable values for checks
294
  if not (hf_profile and hf_token):
295
  history = add_bot_message(history, "Workflow paused: Please log in to Hugging Face first.")
296
  # Yield updated history and current state, then exit for this click
 
317
  history = add_bot_message(history, "Workflow reset.")
318
  # Yield updated history and reset state variables to their initial values
319
  yield (history, None, STATE_IDLE, "<p>No Space created yet.</p>", "", "", 0,
320
+ None, None, None, False) # Reset use_grounding to default False, and other states to None/default
321
  # No return needed after yield in this generator pattern; execution for this click ends here.
322
 
323
  elif generate_match:
 
509
  # Logic to determine required packages based on SDK and keywords in the app description
510
  reqs_list = ["gradio"] if space_sdk == "gradio" else ["streamlit"]
511
  # Add essential libraries regardless of description keywords or grounding
512
+ # Be more specific about which packages might be needed
513
+ essential_libs = ["google-generativeai", "huggingface_hub"]
514
+ if gemini_api_key and gemini_model: # Only add if Gemini is actually configured
515
+ reqs_list.extend(essential_libs)
516
+
517
+ # Add common libraries if description suggests they might be needed
518
  if app_desc:
519
  app_desc_lower = app_desc.lower()
520
+ if "requests" in app_desc_lower or "api" in app_desc_lower:
521
+ reqs_list.append("requests")
522
+ # Image processing libraries
523
+ if "image" in app_desc_lower or "upload" in app_desc_lower or "blur" in app_desc_lower or "vision" in app_desc_lower or "photo" in app_desc_lower:
524
+ reqs_list.append("Pillow")
525
+ if "numpy" in app_desc_lower: reqs_list.append("numpy")
526
+ if "pandas" in app_desc_lower or "dataframe" in app_desc_lower: reqs_list.append("pandas")
527
+ # Add scikit-image and opencv if image processing is heavily implied
528
  if any(lib in app_desc_lower for lib in ["scikit-image", "skimage", "cv2", "opencv"]):
529
  reqs_list.extend(["scikit-image", "opencv-python"]) # Note: opencv-python for pip
530
+ # Add transformers if large models are implied
531
+ if any(lib in app_desc_lower for lib in ["transformer", "llama", "mistral", "bert", "gpt2"]):
532
+ reqs_list.append("transformers")
533
+ # Add torch or tensorflow if deep learning frameworks are implied
534
+ if any(lib in app_desc_lower for lib in ["torch", "pytorch", "tensorflow", "keras"]):
535
+ reqs_list.extend(["torch", "tensorflow"]) # Users might need specific versions, but this is a start
536
 
537
  # Use dict.fromkeys to get unique items while preserving insertion order (Python 3.7+)
538
  reqs_list = list(dict.fromkeys(reqs_list))
539
+ # Sort alphabetically for cleaner requirements.txt
540
+ reqs_list.sort()
541
+
542
 
543
  reqs_content = "\n".join(reqs_list) + "\n"
544
 
 
829
  # Gradio State variables - these persist their values across user interactions (clicks)
830
  hf_profile = gr.State(None)
831
  hf_token = gr.State(None)
832
+ # FIX: Initialize gemini_key state from env var on load
833
  gemini_key = gr.State(os.environ.get("GOOGLE_API_KEY"))
834
  gemini_model = gr.State("gemini-1.5-flash") # Default selected model
835
  repo_id = gr.State(None) # Stores the ID of the created Space
 
850
  # Hugging Face Login Button
851
  login_btn = gr.LoginButton(variant="huggingface")
852
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
853
  # Update status display when login button reports success
854
+ # Chain to also update the Send button/prereq status
855
+ login_btn.click(
856
+ # The LoginButton outputs a tuple (OAuthProfile, OAuthToken) on success
857
+ lambda x: (x[0], x[1]),
858
+ inputs=[login_btn],
859
+ outputs=[hf_profile, hf_token] # Update these State variables
860
+ ).then( # Chain the next action after state is updated
861
+ # Pass all relevant state variables to re-evaluate prerequisites
862
+ update_send_button_state,
863
  inputs=[hf_profile, hf_token, gemini_key, gemini_model],
864
+ outputs=[send_btn, prereq_status] # Update button interactivity and status text
865
  )
866
 
867
  gr.Markdown("## Google AI Studio / Gemini")
 
905
  update_send_button_state, inputs=[hf_profile, hf_token, gemini_key, gemini_model], outputs=[send_btn, prereq_status] # Update button/prereq status
906
  )
907
 
 
908
  # New checkbox for optional grounding
909
  use_grounding_checkbox = gr.Checkbox(
910
  label="Enable Grounding with Google Search",
 
913
  info="Use Google Search results to inform Gemini's response (may improve factuality)."
914
  )
915
  # Link checkbox change to update the state variable
916
+ # No need to update send button status here, as grounding is not a core prerequisite
917
  use_grounding_checkbox.change(
918
  lambda v: v, inputs=use_grounding_checkbox, outputs=use_grounding_state
919
  )
 
946
 
947
  # Helper function to control send button interactivity and prerequisite status text
948
  # This function is triggered by changes in login status and Gemini configuration
949
+ # Defined outside Block but used inside event handlers
950
  def update_send_button_state(
951
  profile: gr.OAuthProfile | None,
952
  token: gr.OAuthToken | None,
 
1018
  # When the 'repo_id' state variable changes, update the text in repo_id_text
1019
  repo_id.change(lambda r: r if r else "None", inputs=repo_id, outputs=repo_id_text)
1020
 
1021
+ # --- Initial Load Event Chain (Defined INSIDE gr.Blocks) ---
1022
+ # This chain runs once when the app loads
1023
+ ai_builder_tab.load(
1024
+ # Action 1: Configure Gemini using the initial state values (from env var if set)
1025
+ configure_gemini,
1026
+ inputs=[gemini_key, gemini_model],
1027
+ outputs=[gemini_status] # Update Gemini status display
1028
+ ).then(
1029
+ # Action 2: Update the send button state and prerequisite status based on initial states
1030
+ update_send_button_state,
1031
+ inputs=[hf_profile, hf_token, gemini_key, gemini_model],
1032
+ outputs=[send_btn, prereq_status] # Update button interactivity and status text
1033
+ ).then(
1034
+ # Action 3: Add the initial welcome message to the chatbot
1035
+ greet,
1036
+ inputs=None,
1037
+ outputs=chatbot
1038
+ )
1039
+
1040
+
1041
+ # These helper functions and the main workflow function are correctly defined OUTSIDE the gr.Blocks context
1042
+ # because they operate on the *values* passed to them by Gradio event triggers, not the UI component objects themselves.
1043
+
1044
+ # Add an initial welcome message to the chatbot (defined outside Blocks to be called by load chain)
1045
+ def greet():
1046
+ return [{"role": "assistant", "content": "Welcome! Please log in to Hugging Face and provide your Google AI Studio API key to start building Spaces. Once ready, type 'generate me a gradio app called myapp' or 'create' to begin."}]
1047
 
1048
 
1049
  if __name__ == "__main__":
 
1064
  os.makedirs(os.environ["GRADIO_TEMP_DIR"], exist_ok=True) # Ensure the directory exists
1065
 
1066
  # Launch the Gradio UI
1067
+ # The Gradio launch call blocks execution.
1068
+ ai_builder_tab.launch()