wuhp commited on
Commit
28d44a5
·
verified ·
1 Parent(s): 0c740cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -49
app.py CHANGED
@@ -183,8 +183,8 @@ def ai_workflow_chat(
183
  app_description_state: str | None,
184
  repo_name_state: str | None,
185
  generated_code_state: str | None,
186
- *args, # Catch extra positional arguments
187
- **kwargs # Catch extra keyword arguments
188
  ) -> tuple[
189
  list[dict],
190
  str | None,
@@ -224,15 +224,15 @@ def ai_workflow_chat(
224
  # --- State Machine Logic ---
225
 
226
  if state == STATE_IDLE:
227
- # Check prerequisites first within the state logic as well
228
  if not (hf_profile and hf_token):
229
  history = add_bot_message(history, "Workflow paused: Please log in to Hugging Face first.")
230
  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
231
- return # Stop workflow until login
232
  if not (gemini_api_key and gemini_model):
233
  history = add_bot_message(history, "Workflow paused: Please enter your API key and select a Gemini model.")
234
  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
235
- return # Stop workflow until API key/model set
236
 
237
  # Look for commands
238
  reset_match = "reset" in message.lower()
@@ -378,7 +378,7 @@ Return **only** the python code block for app.py. Do not include any extra text,
378
 
379
  reqs_content = "\n".join(reqs_list) + "\n"
380
 
381
- history = add_bot_message(history, "✅ `requirements.txt` generated. Click 'Send' to generate README.")
382
  state = STATE_UPLOADING_REQUIREMENTS
383
  generated_code = reqs_content
384
  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
@@ -527,7 +527,7 @@ Return **only** the python code block for app.py. Do not include any extra text,
527
  if not fixed_code_to_upload:
528
  history = add_bot_message(history, "Internal error: No fixed code available to upload. Resetting.")
529
  yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
530
- return
531
 
532
  history = add_bot_message(history, "☁️ Uploading fixed `app.py`...")
533
  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
@@ -583,15 +583,11 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
583
  ai_builder_tab.load(show_profile, outputs=login_status)
584
  # Update status on login click
585
  login_btn.click(show_profile, outputs=login_status)
586
- # Store profile and token in state on login click
587
- login_btn.click(lambda x: x, inputs=[login_btn], outputs=[hf_profile, hf_token])
588
 
589
  gr.Markdown("## Google AI Studio API Key")
590
  gemini_input = gr.Textbox(label="API Key", type="password", interactive=True)
591
  gemini_status = gr.Markdown("")
592
 
593
- gemini_input.change(lambda k: k, inputs=gemini_input, outputs=gemini_key)
594
-
595
  gr.Markdown("## Gemini Model")
596
  model_selector = gr.Radio(
597
  choices=[
@@ -603,24 +599,13 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
603
  label="Select model",
604
  interactive=True
605
  )
606
- model_selector.change(lambda m: m, inputs=model_selector, outputs=gemini_model)
607
 
608
- # Configure Gemini status on load and when key/model changes
609
  ai_builder_tab.load(
610
  configure_gemini,
611
  inputs=[gemini_key, gemini_model],
612
  outputs=[gemini_status]
613
  )
614
- gemini_key.change(
615
- configure_gemini,
616
- inputs=[gemini_key, gemini_model],
617
- outputs=[gemini_status]
618
- )
619
- gemini_model.change(
620
- configure_gemini,
621
- inputs=[gemini_key, gemini_model],
622
- outputs=[gemini_status]
623
- )
624
 
625
  gr.Markdown("## Space SDK")
626
  sdk_selector = gr.Radio(choices=["gradio","streamlit"], value="gradio", label="Template SDK", interactive=True)
@@ -630,8 +615,7 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
630
  status_text = gr.Textbox(label="Current State", value=STATE_IDLE, interactive=False)
631
  repo_id_text = gr.Textbox(label="Current Space ID", value="None", interactive=False)
632
 
633
- # --- Added Debugging Indicator ---
634
- gr.Markdown("## Prerequisite Status")
635
  prereq_status = gr.Markdown("Checking...")
636
 
637
 
@@ -662,45 +646,71 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
662
  status_str = ""
663
  if is_logged_in and is_gemini_ready:
664
  status_str = "✅ Ready to send commands."
665
- elif not is_logged_in:
666
- status_str = "⚠️ Please log in to Hugging Face."
667
- elif not is_gemini_ready:
668
- if not key: status_str += "⚠️ Gemini API key not set. "
669
- if not model: status_str += "⚠️ Gemini model not selected."
670
- status_str = status_str.strip()
 
 
 
 
 
671
 
672
 
673
  return gr.update(interactive=is_logged_in and is_gemini_ready), status_str
674
 
 
675
 
676
- # Bind update_send_button_state to the *change* of the relevant state variables
677
- # This ensures it's called with the correct 4 state values plus any extras
678
- hf_profile.change(
679
- update_send_button_state,
680
- inputs=[hf_profile, hf_token, gemini_key, gemini_model],
681
- outputs=[send_btn, prereq_status] # Update button and status indicator
682
- )
683
- hf_token.change(
684
  update_send_button_state,
685
  inputs=[hf_profile, hf_token, gemini_key, gemini_model],
686
  outputs=[send_btn, prereq_status]
687
  )
688
- gemini_key.change(
689
- update_send_button_state,
690
- inputs=[hf_profile, hf_token, gemini_key, gemini_model],
691
- outputs=[send_btn, prereq_status]
 
 
 
 
 
 
 
 
 
 
 
692
  )
693
- gemini_model.change(
694
- update_send_button_state,
695
- inputs=[hf_profile, hf_token, gemini_key, gemini_model],
696
- outputs=[send_btn, prereq_status]
 
 
 
 
 
 
 
 
 
 
 
697
  )
698
 
699
- # Keep the initial load trigger as well
700
  ai_builder_tab.load(
701
  update_send_button_state,
702
  inputs=[hf_profile, hf_token, gemini_key, gemini_model],
703
- outputs=[send_btn, prereq_status] # Update button and status indicator
704
  )
705
 
706
 
 
183
  app_description_state: str | None,
184
  repo_name_state: str | None,
185
  generated_code_state: str | None,
186
+ *args,
187
+ **kwargs
188
  ) -> tuple[
189
  list[dict],
190
  str | None,
 
224
  # --- State Machine Logic ---
225
 
226
  if state == STATE_IDLE:
227
+ # Check prerequisites within the state logic as well
228
  if not (hf_profile and hf_token):
229
  history = add_bot_message(history, "Workflow paused: Please log in to Hugging Face first.")
230
  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
231
+ return
232
  if not (gemini_api_key and gemini_model):
233
  history = add_bot_message(history, "Workflow paused: Please enter your API key and select a Gemini model.")
234
  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
235
+ return
236
 
237
  # Look for commands
238
  reset_match = "reset" in message.lower()
 
378
 
379
  reqs_content = "\n".join(reqs_list) + "\n"
380
 
381
+ history = add_bot_message(history, "✅ `requirements.txt` generated. Click 'Send' to upload.")
382
  state = STATE_UPLOADING_REQUIREMENTS
383
  generated_code = reqs_content
384
  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
 
527
  if not fixed_code_to_upload:
528
  history = add_bot_message(history, "Internal error: No fixed code available to upload. Resetting.")
529
  yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
530
+ return
531
 
532
  history = add_bot_message(history, "☁️ Uploading fixed `app.py`...")
533
  yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
 
583
  ai_builder_tab.load(show_profile, outputs=login_status)
584
  # Update status on login click
585
  login_btn.click(show_profile, outputs=login_status)
 
 
586
 
587
  gr.Markdown("## Google AI Studio API Key")
588
  gemini_input = gr.Textbox(label="API Key", type="password", interactive=True)
589
  gemini_status = gr.Markdown("")
590
 
 
 
591
  gr.Markdown("## Gemini Model")
592
  model_selector = gr.Radio(
593
  choices=[
 
599
  label="Select model",
600
  interactive=True
601
  )
 
602
 
603
+ # Configure Gemini status on load
604
  ai_builder_tab.load(
605
  configure_gemini,
606
  inputs=[gemini_key, gemini_model],
607
  outputs=[gemini_status]
608
  )
 
 
 
 
 
 
 
 
 
 
609
 
610
  gr.Markdown("## Space SDK")
611
  sdk_selector = gr.Radio(choices=["gradio","streamlit"], value="gradio", label="Template SDK", interactive=True)
 
615
  status_text = gr.Textbox(label="Current State", value=STATE_IDLE, interactive=False)
616
  repo_id_text = gr.Textbox(label="Current Space ID", value="None", interactive=False)
617
 
618
+ # --- Debugging Indicator ---
 
619
  prereq_status = gr.Markdown("Checking...")
620
 
621
 
 
646
  status_str = ""
647
  if is_logged_in and is_gemini_ready:
648
  status_str = "✅ Ready to send commands."
649
+ else: # Consolidated status checks
650
+ status_parts = []
651
+ if not is_logged_in:
652
+ status_parts.append("⚠️ Not logged in to Hugging Face.")
653
+ if not key:
654
+ status_parts.append("⚠️ Gemini API key not set.")
655
+ if not model:
656
+ status_parts.append("⚠️ Gemini model not selected.")
657
+ status_str = " ".join(status_parts)
658
+ if not status_str: # Should be ready if status_parts is empty
659
+ status_str = "✅ Ready to send commands."
660
 
661
 
662
  return gr.update(interactive=is_logged_in and is_gemini_ready), status_str
663
 
664
+ # --- Implement Chained Events ---
665
 
666
+ # 1. Login Button: Update profile/token state, THEN update send button state
667
+ login_btn.click(
668
+ lambda x: (x[0], x[1]), # Extract profile and token from LoginButton output
669
+ inputs=[login_btn],
670
+ outputs=[hf_profile, hf_token]
671
+ ).then( # Chain the next action after state is updated
 
 
672
  update_send_button_state,
673
  inputs=[hf_profile, hf_token, gemini_key, gemini_model],
674
  outputs=[send_btn, prereq_status]
675
  )
676
+
677
+
678
+ # 2. Gemini Key Input: Update key state, THEN configure Gemini, THEN update send button state
679
+ gemini_input.change(
680
+ lambda k: k, # Update gemini_key state
681
+ inputs=[gemini_input],
682
+ outputs=[gemini_key]
683
+ ).then( # Chain configure_gemini after key is updated
684
+ configure_gemini,
685
+ inputs=[gemini_key, gemini_model],
686
+ outputs=[gemini_status]
687
+ ).then( # Chain update_send_button_state after config status is updated
688
+ update_send_button_state,
689
+ inputs=[hf_profile, hf_token, gemini_key, gemini_model],
690
+ outputs=[send_btn, prereq_status]
691
  )
692
+
693
+
694
+ # 3. Gemini Model Selector: Update model state, THEN configure Gemini, THEN update send button state
695
+ model_selector.change(
696
+ lambda m: m, # Update gemini_model state
697
+ inputs=[model_selector],
698
+ outputs=[gemini_model]
699
+ ).then( # Chain configure_gemini after model is updated
700
+ configure_gemini,
701
+ inputs=[gemini_key, gemini_model],
702
+ outputs=[gemini_status]
703
+ ).then( # Chain update_send_button_state after config status is updated
704
+ update_send_button_state,
705
+ inputs=[hf_profile, hf_token, gemini_key, gemini_model],
706
+ outputs=[send_btn, prereq_status]
707
  )
708
 
709
+ # 4. Initial Load: Update send button state based on initial (or cached) state
710
  ai_builder_tab.load(
711
  update_send_button_state,
712
  inputs=[hf_profile, hf_token, gemini_key, gemini_model],
713
+ outputs=[send_btn, prereq_status]
714
  )
715
 
716