Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -79,8 +79,7 @@ def _fetch_space_logs_level(repo_id: str, level: str, token: str) -> str:
|
|
79 |
jwt = r.json()["token"]
|
80 |
logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
|
81 |
lines, count = [], 0
|
82 |
-
|
83 |
-
with get_session().get(logs_url, headers=build_hf_headers(token=jwt), stream=True, timeout=30) as resp: # Increased timeout
|
84 |
hf_raise_for_status(resp)
|
85 |
for raw in resp.iter_lines():
|
86 |
if count >= 200:
|
@@ -106,16 +105,13 @@ def _fetch_space_logs_level(repo_id: str, level: str, token: str) -> str:
|
|
106 |
def get_build_logs_action(repo_id, profile, token):
|
107 |
if not (repo_id and profile and token):
|
108 |
return "⚠️ Cannot fetch build logs: log in and create a Space first."
|
109 |
-
|
110 |
-
# The space needs a moment to detect file changes and start building
|
111 |
-
time.sleep(5) # Added delay
|
112 |
return _fetch_space_logs_level(repo_id, "build", token.token)
|
113 |
|
114 |
def get_container_logs_action(repo_id, profile, token):
|
115 |
if not (repo_id and profile and token):
|
116 |
return "⚠️ Cannot fetch container logs: log in and create a Space first."
|
117 |
-
|
118 |
-
time.sleep(10) # Increased delay, build can take a while
|
119 |
return _fetch_space_logs_level(repo_id, "run", token.token)
|
120 |
|
121 |
|
@@ -165,10 +161,8 @@ STATE_COMPLETE = "complete"
|
|
165 |
|
166 |
MAX_DEBUG_ATTEMPTS = 3
|
167 |
|
168 |
-
# Modified to add a new assistant message
|
169 |
def add_bot_message(history: list[dict], bot_message: str) -> list[dict]:
|
170 |
"""Helper to add a new assistant message to the history."""
|
171 |
-
# Chatbot(type='messages') expects [{'role': ..., 'content': ...}]
|
172 |
history.append({"role": "assistant", "content": bot_message})
|
173 |
return history
|
174 |
|
@@ -209,19 +203,19 @@ def ai_workflow_chat(
|
|
209 |
repo_name = repo_name_state
|
210 |
generated_code = generated_code_state
|
211 |
|
|
|
212 |
updated_preview = preview_html
|
213 |
updated_build = build_logs
|
214 |
updated_run = container_logs
|
215 |
|
216 |
# Add user message to history in the new format
|
217 |
user_message_entry = {"role": "user", "content": message}
|
218 |
-
# Optional: add user name if logged in
|
219 |
if hf_profile and hf_profile.username:
|
220 |
user_message_entry["name"] = hf_profile.username
|
221 |
history.append(user_message_entry)
|
222 |
|
223 |
-
# Yield immediately to show user message
|
224 |
-
|
225 |
|
226 |
|
227 |
try:
|
@@ -245,7 +239,6 @@ def ai_workflow_chat(
|
|
245 |
|
246 |
if reset_match:
|
247 |
history = add_bot_message(history, "Workflow reset.")
|
248 |
-
# Reset all state variables
|
249 |
yield history, None, STATE_IDLE, "<p>No Space created yet.</p>", "", "", 0, None, None, None
|
250 |
return
|
251 |
|
@@ -366,21 +359,20 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
366 |
yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
|
367 |
|
368 |
reqs_list = ["gradio"] if space_sdk == "gradio" else ["streamlit"]
|
369 |
-
if "google.generativeai" in str(app_desc).lower() or "gemini" in str(app_desc).lower() or gemini_api_key:
|
370 |
reqs_list.append("google-generativeai")
|
371 |
-
if "requests" in str(app_desc).lower():
|
372 |
reqs_list.append("requests")
|
373 |
reqs_list.append("huggingface_hub")
|
374 |
-
if "image" in str(app_desc).lower() or "upload" in str(app_desc).lower() or "blur" in str(app_desc).lower():
|
375 |
reqs_list.append("Pillow")
|
376 |
-
|
377 |
-
# Add any other common libraries the LLM might generate code for
|
378 |
if "numpy" in str(app_desc).lower(): reqs_list.append("numpy")
|
379 |
if "pandas" in str(app_desc).lower(): reqs_list.append("pandas")
|
380 |
-
#
|
|
|
|
|
381 |
|
382 |
-
# Remove duplicates
|
383 |
-
reqs_list = list(dict.fromkeys(reqs_list))
|
384 |
|
385 |
reqs_content = "\n".join(reqs_list) + "\n"
|
386 |
|
@@ -459,7 +451,7 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
459 |
build_logs_text = get_build_logs_action(repo_id, hf_profile, hf_token)
|
460 |
updated_build = build_logs_text
|
461 |
|
462 |
-
if "Error" in updated_build or "Exception" in updated_build or "Build failed" in updated_build:
|
463 |
history = add_bot_message(history, "⚠️ Build logs indicate potential issues. Please inspect above. Click 'Send' to check container logs (app might still start).")
|
464 |
state = STATE_CHECKING_LOGS_RUN
|
465 |
yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
|
@@ -551,27 +543,26 @@ Return **only** the python code block for app.py. Do not include any extra text,
|
|
551 |
yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
|
552 |
|
553 |
elif state == STATE_COMPLETE:
|
554 |
-
# Workflow finished
|
555 |
-
#
|
556 |
-
pass
|
557 |
|
558 |
|
559 |
except Exception as e:
|
|
|
560 |
error_message = f"Workflow step failed unexpectedly ({state}): {e}. Click 'Send' to re-attempt this step or 'reset'."
|
561 |
history = add_bot_message(history, error_message)
|
562 |
print(f"Critical Error in state {state}: {e}")
|
|
|
563 |
yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
|
564 |
-
return # End generator
|
565 |
|
566 |
|
567 |
-
#
|
568 |
-
#
|
569 |
-
#
|
570 |
-
#
|
571 |
-
#
|
572 |
-
# Let's ensure each path explicitly yields the updated state.
|
573 |
-
# Removed the final implicit yield here, as each state handler should yield.
|
574 |
-
pass
|
575 |
|
576 |
|
577 |
# --- Build the Gradio UI ---
|
@@ -581,14 +572,14 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
581 |
hf_profile = gr.State(None)
|
582 |
hf_token = gr.State(None)
|
583 |
gemini_key = gr.State(None)
|
584 |
-
gemini_model = gr.State("gemini-1.5-flash")
|
585 |
repo_id = gr.State(None)
|
586 |
workflow = gr.State(STATE_IDLE)
|
587 |
sdk_state = gr.State("gradio")
|
588 |
debug_attempts = gr.State(0)
|
589 |
app_description = gr.State(None)
|
590 |
repo_name_state = gr.State(None)
|
591 |
-
generated_code_state = gr.State(None) # Reused
|
592 |
|
593 |
with gr.Row():
|
594 |
# Sidebar
|
@@ -598,11 +589,11 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
598 |
login_btn = gr.LoginButton(variant="huggingface")
|
599 |
|
600 |
# Initial load to check login status
|
|
|
601 |
ai_builder_tab.load(show_profile, outputs=login_status)
|
602 |
# Update status on login click
|
603 |
login_btn.click(show_profile, outputs=login_status)
|
604 |
# Store profile and token in state on login click
|
605 |
-
# This lambda takes the output of the LoginButton (profile, token) as 'x' and returns it
|
606 |
login_btn.click(lambda x: x, inputs=[login_btn], outputs=[hf_profile, hf_token])
|
607 |
|
608 |
gr.Markdown("## Google AI Studio API Key")
|
@@ -614,12 +605,9 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
614 |
gr.Markdown("## Gemini Model")
|
615 |
model_selector = gr.Radio(
|
616 |
choices=[
|
617 |
-
("Gemini 1.5 Flash", "gemini-1.5-flash"),
|
618 |
("Gemini 1.5 Pro", "gemini-1.5-pro"),
|
619 |
("Gemini 1.0 Pro", "gemini-1.0-pro"),
|
620 |
-
# Remove preview models as they can be unstable
|
621 |
-
# ("Gemini 2.5 Flash Preview 04-17", "gemini-2.5-flash-preview-04-17"),
|
622 |
-
# ("Gemini 2.5 Pro Preview 03-25", "gemini-2.5-pro-preview-03-25"),
|
623 |
],
|
624 |
value="gemini-1.5-flash",
|
625 |
label="Select model",
|
@@ -628,13 +616,13 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
628 |
model_selector.change(lambda m: m, inputs=model_selector, outputs=gemini_model)
|
629 |
|
630 |
# Configure Gemini status on load and when key/model changes
|
631 |
-
#
|
632 |
ai_builder_tab.load(
|
633 |
configure_gemini,
|
634 |
inputs=[gemini_key, gemini_model],
|
635 |
-
outputs=[gemini_status]
|
636 |
-
_preprocess=False # Tell Gradio not to pass implicit event output
|
637 |
)
|
|
|
638 |
gemini_input.change(
|
639 |
configure_gemini,
|
640 |
inputs=[gemini_key, gemini_model],
|
@@ -659,29 +647,28 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
659 |
|
660 |
# Main content
|
661 |
with gr.Column(scale=3):
|
662 |
-
|
663 |
-
chatbot = gr.Chatbot(type='messages', label="AI Workflow Chat") # Added type='messages' and label
|
664 |
user_input = gr.Textbox(placeholder="Type your message…", interactive=True)
|
665 |
send_btn = gr.Button("Send", interactive=False)
|
666 |
|
667 |
# Logic to enable send button only when logged in and API key is set
|
668 |
-
# Added _preprocess=False to avoid implicit arguments causing TypeErrors
|
669 |
def update_send_button_state(profile: gr.OAuthProfile | None, token: gr.OAuthToken | None, key: str | None, model: str | None):
|
670 |
is_logged_in = profile is not None and token is not None
|
671 |
is_gemini_ready = key is not None and model is not None
|
672 |
return gr.update(interactive=is_logged_in and is_gemini_ready)
|
673 |
|
|
|
674 |
ai_builder_tab.load(
|
675 |
update_send_button_state,
|
676 |
inputs=[hf_profile, hf_token, gemini_key, gemini_model],
|
677 |
-
outputs=[send_btn]
|
678 |
-
_preprocess=False # Tell Gradio not to pass implicit event output
|
679 |
)
|
|
|
680 |
login_btn.click(
|
681 |
update_send_button_state,
|
682 |
inputs=[hf_profile, hf_token, gemini_key, gemini_model],
|
683 |
outputs=[send_btn],
|
684 |
-
_preprocess=False
|
685 |
)
|
686 |
gemini_input.change(
|
687 |
update_send_button_state,
|
@@ -729,11 +716,10 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
729 |
|
730 |
|
731 |
# Add an initial message to the chatbot on load
|
732 |
-
# THIS CALL IS NOW INSIDE the with gr.Blocks() block
|
733 |
def greet():
|
734 |
-
# Return a list containing a single assistant message dictionary
|
735 |
return [{"role": "assistant", "content": "Welcome! Please log in to Hugging Face and provide your Google AI Studio API key to start building Spaces. Once ready, type 'generate me a gradio app called myapp' or 'create' to begin."}]
|
736 |
|
|
|
737 |
ai_builder_tab.load(greet, outputs=chatbot)
|
738 |
|
739 |
|
@@ -741,7 +727,7 @@ if __name__ == "__main__":
|
|
741 |
# Optional: Configure retries for huggingface_hub requests
|
742 |
# from requests.adapters import HTTPAdapter
|
743 |
# from urllib3.util.retry import Retry
|
744 |
-
# retry_strategy = Retry(total=5, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504])
|
745 |
# adapter = HTTPAdapter(max_retries=retry_strategy)
|
746 |
# get_session().mount("http://", adapter)
|
747 |
# get_session().mount("https://", adapter)
|
|
|
79 |
jwt = r.json()["token"]
|
80 |
logs_url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
|
81 |
lines, count = [], 0
|
82 |
+
with get_session().get(logs_url, headers=build_hf_headers(token=jwt), stream=True, timeout=30) as resp:
|
|
|
83 |
hf_raise_for_status(resp)
|
84 |
for raw in resp.iter_lines():
|
85 |
if count >= 200:
|
|
|
105 |
def get_build_logs_action(repo_id, profile, token):
|
106 |
if not (repo_id and profile and token):
|
107 |
return "⚠️ Cannot fetch build logs: log in and create a Space first."
|
108 |
+
time.sleep(5)
|
|
|
|
|
109 |
return _fetch_space_logs_level(repo_id, "build", token.token)
|
110 |
|
111 |
def get_container_logs_action(repo_id, profile, token):
|
112 |
if not (repo_id and profile and token):
|
113 |
return "⚠️ Cannot fetch container logs: log in and create a Space first."
|
114 |
+
time.sleep(10)
|
|
|
115 |
return _fetch_space_logs_level(repo_id, "run", token.token)
|
116 |
|
117 |
|
|
|
161 |
|
162 |
MAX_DEBUG_ATTEMPTS = 3
|
163 |
|
|
|
164 |
def add_bot_message(history: list[dict], bot_message: str) -> list[dict]:
|
165 |
"""Helper to add a new assistant message to the history."""
|
|
|
166 |
history.append({"role": "assistant", "content": bot_message})
|
167 |
return history
|
168 |
|
|
|
203 |
repo_name = repo_name_state
|
204 |
generated_code = generated_code_state
|
205 |
|
206 |
+
|
207 |
updated_preview = preview_html
|
208 |
updated_build = build_logs
|
209 |
updated_run = container_logs
|
210 |
|
211 |
# Add user message to history in the new format
|
212 |
user_message_entry = {"role": "user", "content": message}
|
|
|
213 |
if hf_profile and hf_profile.username:
|
214 |
user_message_entry["name"] = hf_profile.username
|
215 |
history.append(user_message_entry)
|
216 |
|
217 |
+
# Yield immediately to show user message - this is the first yield
|
218 |
+
yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
|
219 |
|
220 |
|
221 |
try:
|
|
|
239 |
|
240 |
if reset_match:
|
241 |
history = add_bot_message(history, "Workflow reset.")
|
|
|
242 |
yield history, None, STATE_IDLE, "<p>No Space created yet.</p>", "", "", 0, None, None, None
|
243 |
return
|
244 |
|
|
|
359 |
yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
|
360 |
|
361 |
reqs_list = ["gradio"] if space_sdk == "gradio" else ["streamlit"]
|
362 |
+
if "google.generativeai" in str(app_desc).lower() or "gemini" in str(app_desc).lower() or gemini_api_key:
|
363 |
reqs_list.append("google-generativeai")
|
364 |
+
if "requests" in str(app_desc).lower():
|
365 |
reqs_list.append("requests")
|
366 |
reqs_list.append("huggingface_hub")
|
367 |
+
if "image" in str(app_desc).lower() or "upload" in str(app_desc).lower() or "blur" in str(app_desc).lower() or "vision" in str(app_desc).lower(): # Added vision
|
368 |
reqs_list.append("Pillow")
|
|
|
|
|
369 |
if "numpy" in str(app_desc).lower(): reqs_list.append("numpy")
|
370 |
if "pandas" in str(app_desc).lower(): reqs_list.append("pandas")
|
371 |
+
if "scikit-image" in str(app_desc).lower() or "skimage" in str(app_desc).lower() or "cv2" in str(app_desc).lower() or "opencv-python" in str(app_desc).lower(): # Common image processing deps
|
372 |
+
reqs_list.append("scikit-image") # skimage
|
373 |
+
reqs_list.append("opencv-python") # cv2
|
374 |
|
375 |
+
reqs_list = list(dict.fromkeys(reqs_list)) # Remove duplicates
|
|
|
376 |
|
377 |
reqs_content = "\n".join(reqs_list) + "\n"
|
378 |
|
|
|
451 |
build_logs_text = get_build_logs_action(repo_id, hf_profile, hf_token)
|
452 |
updated_build = build_logs_text
|
453 |
|
454 |
+
if "Error" in updated_build or "Exception" in updated_build or "Build failed" in updated_build:
|
455 |
history = add_bot_message(history, "⚠️ Build logs indicate potential issues. Please inspect above. Click 'Send' to check container logs (app might still start).")
|
456 |
state = STATE_CHECKING_LOGS_RUN
|
457 |
yield history, repo_id, state, updated_preview, updated_run, updated_build, attempts, app_desc, repo_name, generated_code
|
|
|
543 |
yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
|
544 |
|
545 |
elif state == STATE_COMPLETE:
|
546 |
+
# Workflow finished. Stay in this state until reset.
|
547 |
+
# The message is set in the state that transitioned to COMPLETE.
|
548 |
+
pass # No further action needed in this state
|
549 |
|
550 |
|
551 |
except Exception as e:
|
552 |
+
# Catch-all for unexpected exceptions in any state
|
553 |
error_message = f"Workflow step failed unexpectedly ({state}): {e}. Click 'Send' to re-attempt this step or 'reset'."
|
554 |
history = add_bot_message(history, error_message)
|
555 |
print(f"Critical Error in state {state}: {e}")
|
556 |
+
# Transition to idle state on unexpected errors
|
557 |
yield history, None, STATE_IDLE, updated_preview, updated_run, updated_build, 0, None, None, None
|
558 |
+
return # End generator
|
559 |
|
560 |
|
561 |
+
# Note: Each state block should end with a yield to update the UI/state.
|
562 |
+
# If a state block finishes without yielding, the generator will stop until the next call.
|
563 |
+
# By yielding the state variables *after* adding the user message, and then yielding
|
564 |
+
# again after state transitions/actions *within* the state logic, we ensure updates.
|
565 |
+
# The final yield after the try/except is removed as yields should happen within the state logic.
|
|
|
|
|
|
|
566 |
|
567 |
|
568 |
# --- Build the Gradio UI ---
|
|
|
572 |
hf_profile = gr.State(None)
|
573 |
hf_token = gr.State(None)
|
574 |
gemini_key = gr.State(None)
|
575 |
+
gemini_model = gr.State("gemini-1.5-flash")
|
576 |
repo_id = gr.State(None)
|
577 |
workflow = gr.State(STATE_IDLE)
|
578 |
sdk_state = gr.State("gradio")
|
579 |
debug_attempts = gr.State(0)
|
580 |
app_description = gr.State(None)
|
581 |
repo_name_state = gr.State(None)
|
582 |
+
generated_code_state = gr.State(None) # Reused
|
583 |
|
584 |
with gr.Row():
|
585 |
# Sidebar
|
|
|
589 |
login_btn = gr.LoginButton(variant="huggingface")
|
590 |
|
591 |
# Initial load to check login status
|
592 |
+
# Removed _preprocess=False
|
593 |
ai_builder_tab.load(show_profile, outputs=login_status)
|
594 |
# Update status on login click
|
595 |
login_btn.click(show_profile, outputs=login_status)
|
596 |
# Store profile and token in state on login click
|
|
|
597 |
login_btn.click(lambda x: x, inputs=[login_btn], outputs=[hf_profile, hf_token])
|
598 |
|
599 |
gr.Markdown("## Google AI Studio API Key")
|
|
|
605 |
gr.Markdown("## Gemini Model")
|
606 |
model_selector = gr.Radio(
|
607 |
choices=[
|
608 |
+
("Gemini 1.5 Flash", "gemini-1.5-flash"),
|
609 |
("Gemini 1.5 Pro", "gemini-1.5-pro"),
|
610 |
("Gemini 1.0 Pro", "gemini-1.0-pro"),
|
|
|
|
|
|
|
611 |
],
|
612 |
value="gemini-1.5-flash",
|
613 |
label="Select model",
|
|
|
616 |
model_selector.change(lambda m: m, inputs=model_selector, outputs=gemini_model)
|
617 |
|
618 |
# Configure Gemini status on load and when key/model changes
|
619 |
+
# Removed _preprocess=False from load
|
620 |
ai_builder_tab.load(
|
621 |
configure_gemini,
|
622 |
inputs=[gemini_key, gemini_model],
|
623 |
+
outputs=[gemini_status]
|
|
|
624 |
)
|
625 |
+
# Keep _preprocess=False on change events as they have implicit outputs (the new value)
|
626 |
gemini_input.change(
|
627 |
configure_gemini,
|
628 |
inputs=[gemini_key, gemini_model],
|
|
|
647 |
|
648 |
# Main content
|
649 |
with gr.Column(scale=3):
|
650 |
+
chatbot = gr.Chatbot(type='messages', label="AI Workflow Chat")
|
|
|
651 |
user_input = gr.Textbox(placeholder="Type your message…", interactive=True)
|
652 |
send_btn = gr.Button("Send", interactive=False)
|
653 |
|
654 |
# Logic to enable send button only when logged in and API key is set
|
|
|
655 |
def update_send_button_state(profile: gr.OAuthProfile | None, token: gr.OAuthToken | None, key: str | None, model: str | None):
|
656 |
is_logged_in = profile is not None and token is not None
|
657 |
is_gemini_ready = key is not None and model is not None
|
658 |
return gr.update(interactive=is_logged_in and is_gemini_ready)
|
659 |
|
660 |
+
# Removed _preprocess=False from load
|
661 |
ai_builder_tab.load(
|
662 |
update_send_button_state,
|
663 |
inputs=[hf_profile, hf_token, gemini_key, gemini_model],
|
664 |
+
outputs=[send_btn]
|
|
|
665 |
)
|
666 |
+
# Keep _preprocess=False on click/change events with explicit inputs
|
667 |
login_btn.click(
|
668 |
update_send_button_state,
|
669 |
inputs=[hf_profile, hf_token, gemini_key, gemini_model],
|
670 |
outputs=[send_btn],
|
671 |
+
_preprocess=False
|
672 |
)
|
673 |
gemini_input.change(
|
674 |
update_send_button_state,
|
|
|
716 |
|
717 |
|
718 |
# Add an initial message to the chatbot on load
|
|
|
719 |
def greet():
|
|
|
720 |
return [{"role": "assistant", "content": "Welcome! Please log in to Hugging Face and provide your Google AI Studio API key to start building Spaces. Once ready, type 'generate me a gradio app called myapp' or 'create' to begin."}]
|
721 |
|
722 |
+
# THIS CALL IS INSIDE the with gr.Blocks() block
|
723 |
ai_builder_tab.load(greet, outputs=chatbot)
|
724 |
|
725 |
|
|
|
727 |
# Optional: Configure retries for huggingface_hub requests
|
728 |
# from requests.adapters import HTTPAdapter
|
729 |
# from urllib3.util.retry import Retry
|
730 |
+
# retry_strategy = Retry(total=5, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504])
|
731 |
# adapter = HTTPAdapter(max_retries=retry_strategy)
|
732 |
# get_session().mount("http://", adapter)
|
733 |
# get_session().mount("https://", adapter)
|