Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -24,6 +24,10 @@ def list_private_models(
|
|
24 |
if not profile or not oauth_token:
|
25 |
return "Please log in to see your models."
|
26 |
try:
|
|
|
|
|
|
|
|
|
27 |
models = [
|
28 |
f"{m.id} ({'private' if m.private else 'public'})"
|
29 |
for m in list_models(author=profile.username, token=oauth_token.token)
|
@@ -42,11 +46,6 @@ def get_sdk_version(sdk_choice: str) -> str:
|
|
42 |
except importlib.metadata.PackageNotFoundError:
|
43 |
return "UNKNOWN"
|
44 |
|
45 |
-
# This utility is no longer needed as parsing is handled in run_codegen
|
46 |
-
# def extract_code(text: str) -> str:
|
47 |
-
# blocks = re.findall(r"```(?:\w*\n)?([\s\S]*?)```", text)
|
48 |
-
# return blocks[-1].strip() if blocks else text.strip()
|
49 |
-
|
50 |
def classify_errors(logs: str) -> str:
|
51 |
errs = set()
|
52 |
# Convert logs to lower for case-insensitive matching
|
@@ -73,6 +72,8 @@ def _get_space_jwt(repo_id: str, token: str) -> str:
|
|
73 |
|
74 |
def fetch_logs(repo_id: str, level: str, token: str) -> str:
|
75 |
"""Fetches build or run logs from an HF Space."""
|
|
|
|
|
76 |
try:
|
77 |
jwt = _get_space_jwt(repo_id, token)
|
78 |
url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
|
@@ -208,7 +209,7 @@ def run_agent(client, model_name, system_prompt_template, user_input_state, conf
|
|
208 |
# Some models return parts, concatenate them
|
209 |
response_text = "".join([part.text for part in response.candidates[0].content.parts])
|
210 |
|
211 |
-
print(f"--- Agent Response --- ({
|
212 |
# print(response_text) # Careful: can be very long
|
213 |
print("----------------------")
|
214 |
|
@@ -289,6 +290,9 @@ def run_codegen(client, project_state, config):
|
|
289 |
if not blocks:
|
290 |
print("Code-Gen Agent did not output any code blocks in expected format.")
|
291 |
project_state['status_message'] = "ERROR: Code-Gen Agent failed to output code blocks in `filename`\\n```code``` format."
|
|
|
|
|
|
|
292 |
return False # Indicate failure
|
293 |
|
294 |
syntax_errors = []
|
@@ -321,8 +325,11 @@ def run_codegen(client, project_state, config):
|
|
321 |
|
322 |
if syntax_errors:
|
323 |
# If syntax errors found, add them to feedback and signal failure for CodeGen step
|
324 |
-
project_state['feedback'] = "Syntax Errors:\n" + "\n".join(syntax_errors) + "\n\n" + project_state['feedback'] # Prepend errors
|
325 |
-
project_state['status_message'] = "ERROR: Code-Gen Agent introduced syntax errors."
|
|
|
|
|
|
|
326 |
return False # Indicate failure due to syntax errors
|
327 |
|
328 |
|
@@ -330,8 +337,10 @@ def run_codegen(client, project_state, config):
|
|
330 |
print(f"Orchestrator: Code-Gen Agent updated files: {list(files_updated.keys())}")
|
331 |
|
332 |
# Add the generated/updated code content to the status message for visibility in UI
|
333 |
-
code_summary = "\n".join([f"`{fn}`:\n```python\n{code[:
|
334 |
-
project_state['
|
|
|
|
|
335 |
|
336 |
return True # Indicate success
|
337 |
|
@@ -362,6 +371,7 @@ def run_debugger(client, project_state, config):
|
|
362 |
project_state['feedback'] = response_text
|
363 |
print("Orchestrator: Debug Agent Feedback Received.")
|
364 |
project_state['status_message'] = "Debug feedback generated."
|
|
|
365 |
return True
|
366 |
|
367 |
# --- MAIN ORCHESTRATION LOGIC ---
|
@@ -372,42 +382,63 @@ def run_debugger(client, project_state, config):
|
|
372 |
def orchestrate_development(client, project_state, config, oauth_token_token):
|
373 |
"""Manages the overall development workflow."""
|
374 |
|
|
|
|
|
|
|
|
|
|
|
|
|
375 |
while project_state['status'] == 'In Progress' and project_state['attempt_count'] < 7:
|
376 |
print(f"\n--- Attempt {project_state['attempt_count'] + 1} ---")
|
377 |
print(f"Current Task: {project_state['current_task']}")
|
378 |
-
|
379 |
current_task = project_state['current_task']
|
380 |
-
step_successful = True # Flag to track if the current step completed without error
|
381 |
-
# Add current task to history for UI visibility
|
382 |
-
project_state['chat_history'].append({"role": "assistant", "content": f"➡️ Task: {current_task}"})
|
383 |
-
project_state['status_message'] = f"Executing: {current_task}..."
|
384 |
|
|
|
|
|
|
|
385 |
|
386 |
-
if
|
387 |
-
# Initial state, move to planning
|
388 |
-
project_state['current_task'] = 'PLANNING'
|
389 |
-
project_state['status_message'] = "Starting project: Initializing."
|
390 |
-
|
391 |
|
392 |
-
|
393 |
step_successful = run_planner(client, project_state, config)
|
394 |
if step_successful:
|
|
|
395 |
project_state['current_task'] = 'CODING - Initial Implementation' # Move to coding after planning
|
396 |
-
# Add plan to chat history for user
|
397 |
-
project_state['chat_history'].append({"role": "assistant", "content": f"**Plan:**\n{project_state['plan']}"})
|
398 |
else:
|
399 |
project_state['current_task'] = 'FAILED' # Planning failed
|
400 |
|
401 |
|
402 |
elif current_task.startswith('CODING'):
|
403 |
-
# Ensure the main app file exists before coding if it's the first coding step
|
404 |
-
if project_state['attempt_count'] == 0
|
405 |
if project_state['main_app_file'] not in project_state['files']:
|
406 |
project_state['files'][project_state['main_app_file']] = f"# Initial {project_state['sdk_choice']} app file\n" # Start with a basic stub
|
407 |
if project_state['sdk_choice'] == 'gradio':
|
408 |
project_state['files'][project_state['main_app_file']] += "import gradio as gr\n\n# Define a simple interface\n# For example: gr.Interface(...).launch()\n"
|
409 |
elif project_state['sdk_choice'] == 'streamlit':
|
410 |
project_state['files'][project_state['main_app_file']] += "import streamlit as st\n\n# Your Streamlit app starts here\n# For example: st.write('Hello, world!')\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
411 |
|
412 |
|
413 |
step_successful = run_codegen(client, project_state, config)
|
@@ -416,11 +447,10 @@ def orchestrate_development(client, project_state, config, oauth_token_token):
|
|
416 |
project_state['current_task'] = 'PUSHING' # Always push after attempting to code
|
417 |
else:
|
418 |
# Code-gen failed (syntax error, parsing issue, etc.)
|
|
|
419 |
# We'll try debugging/coding again in the next attempt loop iteration if attempts allow
|
420 |
-
print("Code-Gen step failed.
|
421 |
-
|
422 |
-
# Add the error message from run_codegen to chat history
|
423 |
-
project_state['chat_history'].append({"role": "assistant", "content": project_state['status_message']})
|
424 |
project_state['current_task'] = 'DEBUGGING' # Go to debugging to analyze the failure
|
425 |
|
426 |
|
@@ -442,42 +472,6 @@ def orchestrate_development(client, project_state, config, oauth_token_token):
|
|
442 |
repo_type="space"
|
443 |
)
|
444 |
|
445 |
-
# Ensure requirements.txt is handled - CodeGen *could* generate it,
|
446 |
-
# but adding boilerplate here guarantees it exists if not generated.
|
447 |
-
if 'requirements.txt' not in project_state['files']:
|
448 |
-
req_content = "pandas\n" + ("streamlit\n" if project_state['sdk_choice']=="streamlit" else "gradio\n") + "google-generativeai\nhuggingface-hub\n"
|
449 |
-
with open("requirements.txt", "w") as f:
|
450 |
-
f.write(req_content)
|
451 |
-
upload_file(path_or_fileobj="requirements.txt", path_in_repo="requirements.txt",
|
452 |
-
repo_id=project_state['repo_id'], token=oauth_token_token, repo_type="space")
|
453 |
-
project_state['files']['requirements.txt'] = req_content # Add to state for completeness
|
454 |
-
|
455 |
-
# Ensure README.md is handled
|
456 |
-
if 'README.md' not in project_state['files']:
|
457 |
-
readme_content = f"""---
|
458 |
-
title: {project_state['repo_id']}
|
459 |
-
emoji: 🐢
|
460 |
-
sdk: {project_state['sdk_choice']}
|
461 |
-
sdk_version: {project_state['sdk_version']}
|
462 |
-
app_file: {project_state['main_app_file']}
|
463 |
-
pinned: false
|
464 |
-
---
|
465 |
-
# {project_state['repo_id']}
|
466 |
-
|
467 |
-
This is an auto-generated HF Space.
|
468 |
-
|
469 |
-
**Requirements:** {project_state['requirements']}
|
470 |
-
|
471 |
-
**Plan:**
|
472 |
-
{project_state['plan']}
|
473 |
-
"""
|
474 |
-
with open("README.md", "w") as f:
|
475 |
-
f.write(readme_content)
|
476 |
-
upload_file(path_or_fileobj="README.md", path_in_repo="README.md",
|
477 |
-
repo_id=project_state['repo_id'], token=oauth_token_token, repo_type="space")
|
478 |
-
project_state['files']['README.md'] = readme_content # Add to state
|
479 |
-
|
480 |
-
|
481 |
print(f"Pushed {len(project_state['files'])} files to {project_state['repo_id']}")
|
482 |
project_state['status_message'] = f"Pushed code to HF Space **{project_state['repo_id']}**. Waiting for build..."
|
483 |
project_state['chat_history'].append({"role": "assistant", "content": project_state['status_message']})
|
@@ -496,9 +490,14 @@ This is an auto-generated HF Space.
|
|
496 |
# Wait a moment for build to start
|
497 |
time.sleep(5) # Initial wait
|
498 |
wait_time = 5
|
499 |
-
max_log_wait =
|
500 |
elapsed_log_wait = 0
|
501 |
logs_fetched = False
|
|
|
|
|
|
|
|
|
|
|
502 |
|
503 |
while elapsed_log_wait < max_log_wait:
|
504 |
try:
|
@@ -506,44 +505,50 @@ This is an auto-generated HF Space.
|
|
506 |
run_logs = fetch_logs(project_state['repo_id'], "run", oauth_token_token)
|
507 |
project_state['logs']['build'] = build_logs
|
508 |
project_state['logs']['run'] = run_logs
|
509 |
-
project_state['iframe_ok'] = check_iframe(project_state['iframe_url'])
|
510 |
-
|
511 |
logs_fetched = True
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
521 |
break # Exit the log fetching wait loop
|
522 |
-
elif "ERROR" in build_logs.upper() or "FATAL" in build_logs.upper():
|
523 |
-
print("Build errors detected, proceeding to debugging.")
|
524 |
-
break # Proceed to debugging to analyze errors
|
525 |
else:
|
526 |
-
print(f"Logs
|
527 |
time.sleep(wait_time)
|
528 |
elapsed_log_wait += wait_time
|
529 |
-
wait_time = min(wait_time * 1.5,
|
530 |
|
531 |
|
532 |
except Exception as e:
|
533 |
print(f"Error during log fetching or iframe check: {e}. Will retry.")
|
534 |
time.sleep(wait_time)
|
535 |
elapsed_log_wait += wait_time
|
536 |
-
wait_time = min(wait_time * 1.5,
|
537 |
|
538 |
|
539 |
-
if logs_fetched:
|
540 |
-
project_state['status_message'] = "Logs fetched and iframe checked."
|
541 |
project_state['chat_history'].append({"role": "assistant", "content": project_state['status_message']})
|
542 |
project_state['current_task'] = 'DEBUGGING' # Move to debugging to analyze logs
|
543 |
else:
|
544 |
step_successful = False
|
545 |
-
project_state['status'] = 'Failed' # Failed to fetch logs within timeout
|
546 |
-
project_state['status_message'] = "ERROR: Failed to fetch logs or iframe within timeout."
|
547 |
project_state['chat_history'].append({"role": "assistant", "content": project_state['status_message']})
|
548 |
project_state['current_task'] = 'FINISHED' # End process
|
549 |
|
@@ -559,8 +564,17 @@ This is an auto-generated HF Space.
|
|
559 |
# Analyze feedback to decide next step
|
560 |
feedback = project_state['feedback']
|
561 |
iframe_ok = project_state.get('iframe_ok', False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
562 |
error_types = classify_errors(project_state['logs'].get('build', '') + '\n' + project_state['logs'].get('run', ''))
|
563 |
|
|
|
564 |
print(f"Debug Analysis - Feedback: {feedback[:100]}... | Iframe OK: {iframe_ok} | Errors: {error_types}")
|
565 |
|
566 |
|
@@ -570,7 +584,7 @@ This is an auto-generated HF Space.
|
|
570 |
project_state['status'] = 'Complete'
|
571 |
project_state['current_task'] = 'FINISHED'
|
572 |
project_state['status_message'] = "Debug Agent reports clear. Project appears complete."
|
573 |
-
elif project_state['attempt_count'] >= 6: # Max attempts reached AFTER debugging
|
574 |
project_state['status'] = 'Failed'
|
575 |
project_state['current_task'] = 'FINISHED'
|
576 |
project_state['status_message'] = f"Max attempts ({project_state['attempt_count']+1}/7) reached after debugging. Project failed."
|
@@ -578,7 +592,7 @@ This is an auto-generated HF Space.
|
|
578 |
# Errors or issues found, need more coding/debugging
|
579 |
project_state['current_task'] = 'CODING - Addressing Feedback'
|
580 |
project_state['status_message'] = "Debug Agent found issues. Returning to Coding phase to address feedback."
|
581 |
-
project_state['attempt_count'] += 1 # Increment attempt count
|
582 |
backoff_wait = min(project_state['attempt_count'] * 5, 30) # Backoff before next coding attempt
|
583 |
print(f"Waiting {backoff_wait} seconds before next coding attempt...")
|
584 |
time.sleep(backoff_wait)
|
@@ -592,30 +606,49 @@ This is an auto-generated HF Space.
|
|
592 |
|
593 |
elif current_task == 'FINISHED':
|
594 |
# Exit the main loop
|
595 |
-
|
596 |
|
597 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
598 |
|
599 |
-
|
|
|
|
|
|
|
600 |
if not step_successful and project_state['status'] == 'In Progress':
|
601 |
project_state['status'] = 'Failed'
|
602 |
-
project_state['status_message'] = project_state.get('status_message', 'An
|
603 |
project_state['current_task'] = 'FINISHED' # End process
|
604 |
|
|
|
605 |
# --- End of Orchestration Loop ---
|
606 |
|
607 |
# Final status message if loop exited without explicit FINISHED state
|
608 |
if project_state['status'] == 'In Progress':
|
|
|
609 |
project_state['status'] = 'Failed'
|
610 |
project_state['status_message'] = project_state.get('status_message', 'Orchestration loop exited unexpectedly.')
|
611 |
|
612 |
|
613 |
-
# Add final outcome message to history
|
614 |
-
|
|
|
|
|
|
|
615 |
if project_state['status'] == 'Complete':
|
616 |
-
|
617 |
-
|
618 |
-
|
|
|
|
|
|
|
|
|
619 |
|
620 |
|
621 |
# Return final state for UI update
|
@@ -624,7 +657,7 @@ This is an auto-generated HF Space.
|
|
624 |
project_state['logs'].get('build', 'No build logs.'),
|
625 |
project_state['logs'].get('run', 'No run logs.'),
|
626 |
(f'<iframe src="{project_state["iframe_url"]}" width="100%" height="500px"></iframe>'
|
627 |
-
+ ("" if project_state.get('iframe_ok') else "<p style='color:red;'>⚠️ iframe not responding or
|
628 |
project_state['status_message'] # Return the final status message
|
629 |
)
|
630 |
|
@@ -641,9 +674,21 @@ def handle_user_message(
|
|
641 |
profile: gr.OAuthProfile | None,
|
642 |
oauth_token: gr.OAuthToken | None # We need the token object
|
643 |
):
|
644 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
645 |
# Append error message to history for display
|
646 |
-
error_history = history + [{"role":"assistant","content":"⚠️ Please log in first."}]
|
|
|
647 |
return error_history, "", "", "<p>Please log in.</p>", "Login required."
|
648 |
|
649 |
if not gemini_api_key:
|
@@ -658,13 +703,13 @@ def handle_user_message(
|
|
658 |
code_fn = "app.py" if sdk_choice == "gradio" else "streamlit_app.py" # Standard main file name convention
|
659 |
|
660 |
# Get the user's latest prompt from the history
|
|
|
661 |
user_prompt = history[-1]['content'] if history and history[-1].get("role") == "user" else "No prompt provided."
|
662 |
-
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
667 |
-
break
|
668 |
|
669 |
|
670 |
# Initialize project state for this development session
|
@@ -675,7 +720,7 @@ def handle_user_message(
|
|
675 |
'files': {}, # Use a dict to store multiple file contents {filename: code}
|
676 |
'logs': {'build': '', 'run': ''},
|
677 |
'feedback': '',
|
678 |
-
'current_task': 'START',
|
679 |
'status': 'In Progress',
|
680 |
'status_message': 'Initializing...',
|
681 |
'attempt_count': 0,
|
@@ -696,7 +741,7 @@ def handle_user_message(
|
|
696 |
|
697 |
# Start the orchestration process
|
698 |
final_history, final_build_logs, final_run_logs, final_iframe_html, final_status_message = orchestrate_development(
|
699 |
-
client, project_state, cfg, oauth_token.token
|
700 |
)
|
701 |
|
702 |
# Return the final state for the UI
|
@@ -717,14 +762,29 @@ with gr.Blocks(title="HF Space Auto‑Builder (Team AI)") as demo:
|
|
717 |
|
718 |
with gr.Row():
|
719 |
with gr.Column(scale=1):
|
|
|
720 |
login_btn = gr.LoginButton(variant="huggingface", size="lg")
|
721 |
status_md = gr.Markdown("*Not logged in.*")
|
722 |
models_md = gr.Markdown()
|
723 |
-
|
|
|
724 |
demo.load(show_profile, inputs=None, outputs=status_md, api_name="load_profile")
|
725 |
-
demo.load(list_private_models, inputs=
|
726 |
-
|
727 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
728 |
|
729 |
|
730 |
gr.Markdown("---")
|
@@ -745,38 +805,41 @@ with gr.Blocks(title="HF Space Auto‑Builder (Team AI)") as demo:
|
|
745 |
with gr.Accordion("Logs", open=False):
|
746 |
build_box = gr.Textbox(label="Build logs", lines=10, interactive=False, max_lines=20)
|
747 |
run_box = gr.Textbox(label="Run logs", lines=10, interactive=False, max_lines=20)
|
748 |
-
# Need login state for refresh button
|
749 |
refresh_btn = gr.Button("🔄 Refresh Logs Only")
|
750 |
|
751 |
with gr.Accordion("App Preview", open=True):
|
752 |
preview = gr.HTML("<p>App preview will load here when available.</p>")
|
753 |
|
|
|
754 |
# Update the button click handler
|
755 |
# It will now return the updated chatbot history, logs, preview, and the project status
|
|
|
756 |
send_btn.click(
|
757 |
fn=handle_user_message,
|
758 |
-
inputs=[chatbot, sdk_choice, api_key, grounding, temp, max_tokens, login_btn, login_btn], #
|
759 |
outputs=[chatbot, build_box, run_box, preview, project_status_md]
|
760 |
)
|
761 |
user_in.submit(
|
762 |
fn=handle_user_message,
|
763 |
-
inputs=[chatbot, sdk_choice, api_key, grounding, temp, max_tokens, login_btn, login_btn], #
|
764 |
outputs=[chatbot, build_box, run_box, preview, project_status_md]
|
765 |
)
|
766 |
|
767 |
# Handler for refreshing logs manually
|
768 |
-
#
|
769 |
refresh_btn.click(
|
770 |
fn=lambda profile, token: (
|
771 |
-
fetch_logs(f"{profile.username}/{profile.username}-auto-space", "build", token.token) if profile and token else "Login required to fetch logs.",
|
772 |
-
fetch_logs(f"{profile.username}/{profile.username}-auto-space", "run", token.token) if profile and token else "Login required to fetch logs."
|
773 |
),
|
774 |
-
inputs=[login_btn, login_btn], #
|
775 |
outputs=[build_box, run_box]
|
776 |
)
|
777 |
|
778 |
|
779 |
# Clean up files created during the process when the app stops (optional, good for Spaces)
|
|
|
780 |
# demo.on_event("close", lambda: [os.remove(f) for f in os.listdir() if os.path.isfile(f) and (f.endswith(".py") or f.endswith(".txt") or f.endswith(".md"))]) # Be careful with this in production
|
781 |
|
782 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
24 |
if not profile or not oauth_token:
|
25 |
return "Please log in to see your models."
|
26 |
try:
|
27 |
+
# Check if the token is valid before listing models
|
28 |
+
if not hasattr(oauth_token, 'token') or not oauth_token.token:
|
29 |
+
return "Invalid or missing access token."
|
30 |
+
|
31 |
models = [
|
32 |
f"{m.id} ({'private' if m.private else 'public'})"
|
33 |
for m in list_models(author=profile.username, token=oauth_token.token)
|
|
|
46 |
except importlib.metadata.PackageNotFoundError:
|
47 |
return "UNKNOWN"
|
48 |
|
|
|
|
|
|
|
|
|
|
|
49 |
def classify_errors(logs: str) -> str:
|
50 |
errs = set()
|
51 |
# Convert logs to lower for case-insensitive matching
|
|
|
72 |
|
73 |
def fetch_logs(repo_id: str, level: str, token: str) -> str:
|
74 |
"""Fetches build or run logs from an HF Space."""
|
75 |
+
if not token:
|
76 |
+
return "Login required to fetch logs."
|
77 |
try:
|
78 |
jwt = _get_space_jwt(repo_id, token)
|
79 |
url = f"https://api.hf.space/v1/{repo_id}/logs/{level}"
|
|
|
209 |
# Some models return parts, concatenate them
|
210 |
response_text = "".join([part.text for part in response.candidates[0].content.parts])
|
211 |
|
212 |
+
print(f"--- Agent Response --- ({model_to_use})")
|
213 |
# print(response_text) # Careful: can be very long
|
214 |
print("----------------------")
|
215 |
|
|
|
290 |
if not blocks:
|
291 |
print("Code-Gen Agent did not output any code blocks in expected format.")
|
292 |
project_state['status_message'] = "ERROR: Code-Gen Agent failed to output code blocks in `filename`\\n```code``` format."
|
293 |
+
# Add the agent's raw response to chat history for debugging if no blocks found
|
294 |
+
project_state['chat_history'].append({"role": "assistant", "content": f"Code-Gen Agent raw response (no code blocks detected):\n\n{response_text}"})
|
295 |
+
|
296 |
return False # Indicate failure
|
297 |
|
298 |
syntax_errors = []
|
|
|
325 |
|
326 |
if syntax_errors:
|
327 |
# If syntax errors found, add them to feedback and signal failure for CodeGen step
|
328 |
+
project_state['feedback'] = "Syntax Errors Found:\n" + "\n".join(syntax_errors) + "\n\n" + project_state['feedback'] # Prepend errors
|
329 |
+
project_state['status_message'] = "ERROR: Code-Gen Agent introduced syntax errors. Debugging needed."
|
330 |
+
# Add syntax errors to chat history for user visibility
|
331 |
+
project_state['chat_history'].append({"role": "assistant", "content": project_state['status_message']})
|
332 |
+
project_state['chat_history'].append({"role": "assistant", "content": "Details:\n" + "\n".join(syntax_errors)})
|
333 |
return False # Indicate failure due to syntax errors
|
334 |
|
335 |
|
|
|
337 |
print(f"Orchestrator: Code-Gen Agent updated files: {list(files_updated.keys())}")
|
338 |
|
339 |
# Add the generated/updated code content to the status message for visibility in UI
|
340 |
+
code_summary = "\n".join([f"`{fn}`:\n```python\n{code[:500]}{'...' if len(code) > 500 else ''}\n```" for fn, code in files_updated.items()]) # Show snippet
|
341 |
+
project_state['chat_history'].append({"role": "assistant", "content": f"**Code Generated/Updated:**\n\n{code_summary}"})
|
342 |
+
project_state['status_message'] = f"Code generated/updated: {list(files_updated.keys())}"
|
343 |
+
|
344 |
|
345 |
return True # Indicate success
|
346 |
|
|
|
371 |
project_state['feedback'] = response_text
|
372 |
print("Orchestrator: Debug Agent Feedback Received.")
|
373 |
project_state['status_message'] = "Debug feedback generated."
|
374 |
+
# Add debug feedback to chat history (already done in orchestrate_development, but could be here)
|
375 |
return True
|
376 |
|
377 |
# --- MAIN ORCHESTRATION LOGIC ---
|
|
|
382 |
def orchestrate_development(client, project_state, config, oauth_token_token):
|
383 |
"""Manages the overall development workflow."""
|
384 |
|
385 |
+
# Initial step transition
|
386 |
+
if project_state['current_task'] == 'START':
|
387 |
+
project_state['current_task'] = 'PLANNING'
|
388 |
+
project_state['status_message'] = "Starting project: Initializing and moving to Planning."
|
389 |
+
|
390 |
+
|
391 |
while project_state['status'] == 'In Progress' and project_state['attempt_count'] < 7:
|
392 |
print(f"\n--- Attempt {project_state['attempt_count'] + 1} ---")
|
393 |
print(f"Current Task: {project_state['current_task']}")
|
|
|
394 |
current_task = project_state['current_task']
|
|
|
|
|
|
|
|
|
395 |
|
396 |
+
# Add current task to history for UI visibility (if not already added by previous step)
|
397 |
+
if not project_state['chat_history'] or project_state['chat_history'][-1].get('content', '').strip() != f"➡️ Task: {current_task}".strip():
|
398 |
+
project_state['chat_history'].append({"role": "assistant", "content": f"➡️ Task: {current_task}"})
|
399 |
|
400 |
+
step_successful = True # Flag to track if the current step completed without error
|
|
|
|
|
|
|
|
|
401 |
|
402 |
+
if current_task == 'PLANNING':
|
403 |
step_successful = run_planner(client, project_state, config)
|
404 |
if step_successful:
|
405 |
+
# Add plan to chat history for user (done inside run_planner now?) - moved to run_planner
|
406 |
project_state['current_task'] = 'CODING - Initial Implementation' # Move to coding after planning
|
|
|
|
|
407 |
else:
|
408 |
project_state['current_task'] = 'FAILED' # Planning failed
|
409 |
|
410 |
|
411 |
elif current_task.startswith('CODING'):
|
412 |
+
# Ensure the main app file exists before coding if it's the first coding step (or first after syntax error)
|
413 |
+
if project_state['attempt_count'] == 0 or 'Syntax Errors' in project_state.get('feedback', ''):
|
414 |
if project_state['main_app_file'] not in project_state['files']:
|
415 |
project_state['files'][project_state['main_app_file']] = f"# Initial {project_state['sdk_choice']} app file\n" # Start with a basic stub
|
416 |
if project_state['sdk_choice'] == 'gradio':
|
417 |
project_state['files'][project_state['main_app_file']] += "import gradio as gr\n\n# Define a simple interface\n# For example: gr.Interface(...).launch()\n"
|
418 |
elif project_state['sdk_choice'] == 'streamlit':
|
419 |
project_state['files'][project_state['main_app_file']] += "import streamlit as st\n\n# Your Streamlit app starts here\n# For example: st.write('Hello, world!')\n"
|
420 |
+
# Also ensure requirements.txt and README are stubbed if they don't exist
|
421 |
+
if 'requirements.txt' not in project_state['files']:
|
422 |
+
project_state['files']['requirements.txt'] = "pandas\n" + ("streamlit\n" if project_state['sdk_choice']=="streamlit" else "gradio\n") + "google-generativeai\nhuggingface-hub\n"
|
423 |
+
if 'README.md' not in project_state['files']:
|
424 |
+
readme_content = f"""---
|
425 |
+
title: {project_state['repo_id']}
|
426 |
+
emoji: 🐢
|
427 |
+
sdk: {project_state['sdk_choice']}
|
428 |
+
sdk_version: {project_state['sdk_version']}
|
429 |
+
app_file: {project_state['main_app_file']}
|
430 |
+
pinned: false
|
431 |
+
---
|
432 |
+
# {project_state['repo_id']}
|
433 |
+
|
434 |
+
This is an auto-generated HF Space.
|
435 |
+
|
436 |
+
**Requirements:** {project_state['requirements']}
|
437 |
+
|
438 |
+
**Plan:**
|
439 |
+
{project_state['plan']}
|
440 |
+
"""
|
441 |
+
project_state['files']['README.md'] = readme_content
|
442 |
|
443 |
|
444 |
step_successful = run_codegen(client, project_state, config)
|
|
|
447 |
project_state['current_task'] = 'PUSHING' # Always push after attempting to code
|
448 |
else:
|
449 |
# Code-gen failed (syntax error, parsing issue, etc.)
|
450 |
+
# The failure is handled within run_codegen by setting status_message and feedback
|
451 |
# We'll try debugging/coding again in the next attempt loop iteration if attempts allow
|
452 |
+
print("Code-Gen step failed.")
|
453 |
+
# attempt_count is incremented AFTER debugging phase analyses results
|
|
|
|
|
454 |
project_state['current_task'] = 'DEBUGGING' # Go to debugging to analyze the failure
|
455 |
|
456 |
|
|
|
472 |
repo_type="space"
|
473 |
)
|
474 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
475 |
print(f"Pushed {len(project_state['files'])} files to {project_state['repo_id']}")
|
476 |
project_state['status_message'] = f"Pushed code to HF Space **{project_state['repo_id']}**. Waiting for build..."
|
477 |
project_state['chat_history'].append({"role": "assistant", "content": project_state['status_message']})
|
|
|
490 |
# Wait a moment for build to start
|
491 |
time.sleep(5) # Initial wait
|
492 |
wait_time = 5
|
493 |
+
max_log_wait = 90 # Maximum total time to wait for logs in this step (increased slightly)
|
494 |
elapsed_log_wait = 0
|
495 |
logs_fetched = False
|
496 |
+
iframe_checked = False
|
497 |
+
|
498 |
+
project_state['status_message'] = "Fetching logs and checking iframe..."
|
499 |
+
project_state['chat_history'].append({"role": "assistant", "content": project_state['status_message']})
|
500 |
+
|
501 |
|
502 |
while elapsed_log_wait < max_log_wait:
|
503 |
try:
|
|
|
505 |
run_logs = fetch_logs(project_state['repo_id'], "run", oauth_token_token)
|
506 |
project_state['logs']['build'] = build_logs
|
507 |
project_state['logs']['run'] = run_logs
|
|
|
|
|
508 |
logs_fetched = True
|
509 |
+
|
510 |
+
# Only check iframe once logs indicate something might be running, or after a delay
|
511 |
+
if elapsed_log_wait > 10: # Don't check immediately
|
512 |
+
project_state['iframe_ok'] = check_iframe(project_state['iframe_url'])
|
513 |
+
iframe_checked = True
|
514 |
+
else:
|
515 |
+
project_state['iframe_ok'] = False # Assume not ready yet
|
516 |
+
|
517 |
+
|
518 |
+
print(f"Log/Iframe check at {elapsed_log_wait}s. Build logs len: {len(build_logs)}, Run logs len: {len(run_logs)}, Iframe OK: {project_state['iframe_ok']}")
|
519 |
+
|
520 |
+
# Conditions to proceed to debugging:
|
521 |
+
# 1. Iframe is OK (app is running and accessible)
|
522 |
+
# 2. Build logs show errors (need debugging ASAP)
|
523 |
+
# 3. Max wait time is almost reached (proceed with whatever logs we have)
|
524 |
+
# 4. Build logs exist and indicate *some* progress (e.g., contain "Building" or sufficient length)
|
525 |
+
if project_state['iframe_ok'] or \
|
526 |
+
"ERROR" in build_logs.upper() or "FATAL" in build_logs.upper() or \
|
527 |
+
elapsed_log_wait >= max_log_wait - wait_time or \
|
528 |
+
("Building" in build_logs or len(build_logs) > 100) and logs_fetched: # Heuristic for build progress
|
529 |
break # Exit the log fetching wait loop
|
|
|
|
|
|
|
530 |
else:
|
531 |
+
print(f"Logs or iframe not ready. Waiting {wait_time}s...")
|
532 |
time.sleep(wait_time)
|
533 |
elapsed_log_wait += wait_time
|
534 |
+
wait_time = min(wait_time * 1.5, 20) # Increase wait time, cap at 20s
|
535 |
|
536 |
|
537 |
except Exception as e:
|
538 |
print(f"Error during log fetching or iframe check: {e}. Will retry.")
|
539 |
time.sleep(wait_time)
|
540 |
elapsed_log_wait += wait_time
|
541 |
+
wait_time = min(wait_time * 1.5, 20)
|
542 |
|
543 |
|
544 |
+
if logs_fetched or iframe_checked: # Proceed if we got logs OR checked the iframe
|
545 |
+
project_state['status_message'] = "Logs fetched and iframe checked (or timeout reached)."
|
546 |
project_state['chat_history'].append({"role": "assistant", "content": project_state['status_message']})
|
547 |
project_state['current_task'] = 'DEBUGGING' # Move to debugging to analyze logs
|
548 |
else:
|
549 |
step_successful = False
|
550 |
+
project_state['status'] = 'Failed' # Failed to fetch logs/check iframe within timeout after retries
|
551 |
+
project_state['status_message'] = "ERROR: Failed to fetch logs or check iframe within timeout after multiple retries."
|
552 |
project_state['chat_history'].append({"role": "assistant", "content": project_state['status_message']})
|
553 |
project_state['current_task'] = 'FINISHED' # End process
|
554 |
|
|
|
564 |
# Analyze feedback to decide next step
|
565 |
feedback = project_state['feedback']
|
566 |
iframe_ok = project_state.get('iframe_ok', False)
|
567 |
+
# Re-check logs just before making the decision for freshest info if possible
|
568 |
+
# This might add latency, skipping for now, rely on logs fetched in LOGGING step
|
569 |
+
# build_logs = fetch_logs(project_state['repo_id'], "build", oauth_token_token)
|
570 |
+
# run_logs = fetch_logs(project_state['repo_id'], "run", oauth_token_token)
|
571 |
+
# error_types = classify_errors(build_logs + '\n' + run_logs)
|
572 |
+
# project_state['logs']['build'] = build_logs # Update state with freshest logs
|
573 |
+
# project_state['logs']['run'] = run_logs
|
574 |
+
|
575 |
error_types = classify_errors(project_state['logs'].get('build', '') + '\n' + project_state['logs'].get('run', ''))
|
576 |
|
577 |
+
|
578 |
print(f"Debug Analysis - Feedback: {feedback[:100]}... | Iframe OK: {iframe_ok} | Errors: {error_types}")
|
579 |
|
580 |
|
|
|
584 |
project_state['status'] = 'Complete'
|
585 |
project_state['current_task'] = 'FINISHED'
|
586 |
project_state['status_message'] = "Debug Agent reports clear. Project appears complete."
|
587 |
+
elif project_state['attempt_count'] >= 6: # Max attempts reached AFTER debugging analysis
|
588 |
project_state['status'] = 'Failed'
|
589 |
project_state['current_task'] = 'FINISHED'
|
590 |
project_state['status_message'] = f"Max attempts ({project_state['attempt_count']+1}/7) reached after debugging. Project failed."
|
|
|
592 |
# Errors or issues found, need more coding/debugging
|
593 |
project_state['current_task'] = 'CODING - Addressing Feedback'
|
594 |
project_state['status_message'] = "Debug Agent found issues. Returning to Coding phase to address feedback."
|
595 |
+
project_state['attempt_count'] += 1 # Increment attempt count AFTER a debug cycle points back to coding
|
596 |
backoff_wait = min(project_state['attempt_count'] * 5, 30) # Backoff before next coding attempt
|
597 |
print(f"Waiting {backoff_wait} seconds before next coding attempt...")
|
598 |
time.sleep(backoff_wait)
|
|
|
606 |
|
607 |
elif current_task == 'FINISHED':
|
608 |
# Exit the main loop
|
609 |
+
pass # Loop condition handles exit
|
610 |
|
611 |
+
else:
|
612 |
+
# Unknown task
|
613 |
+
step_successful = False
|
614 |
+
project_state['status'] = 'Failed'
|
615 |
+
project_state['status_message'] = f"ERROR: Orchestrator entered an unknown task state: {current_task}"
|
616 |
+
project_state['chat_history'].append({"role": "assistant", "content": project_state['status_message']})
|
617 |
+
print(project_state['status_message'])
|
618 |
+
project_state['current_task'] = 'FINISHED' # End process
|
619 |
|
620 |
+
|
621 |
+
# If a step failed and didn't explicitly set status to FAILED (like PUSHING),
|
622 |
+
# the orchestrator logic above should handle transition to FAILED or DEBUGGING.
|
623 |
+
# This check acts as a safeguard.
|
624 |
if not step_successful and project_state['status'] == 'In Progress':
|
625 |
project_state['status'] = 'Failed'
|
626 |
+
project_state['status_message'] = project_state.get('status_message', f'An error occurred during task: {current_task}')
|
627 |
project_state['current_task'] = 'FINISHED' # End process
|
628 |
|
629 |
+
|
630 |
# --- End of Orchestration Loop ---
|
631 |
|
632 |
# Final status message if loop exited without explicit FINISHED state
|
633 |
if project_state['status'] == 'In Progress':
|
634 |
+
# This shouldn't happen with the current logic, but good practice
|
635 |
project_state['status'] = 'Failed'
|
636 |
project_state['status_message'] = project_state.get('status_message', 'Orchestration loop exited unexpectedly.')
|
637 |
|
638 |
|
639 |
+
# Add final outcome message to history if not already the last message
|
640 |
+
final_outcome_message = f"**Project Outcome:** {project_state['status']} - {project_state['status_message']}"
|
641 |
+
if not project_state['chat_history'] or project_state['chat_history'][-1].get('content', '').strip() != final_outcome_message.strip():
|
642 |
+
project_state['chat_history'].append({"role": "assistant", "content": final_outcome_message})
|
643 |
+
|
644 |
if project_state['status'] == 'Complete':
|
645 |
+
completion_message = "✅ Application deployed successfully (likely)! Check the preview above."
|
646 |
+
if not project_state['chat_history'] or project_state['chat_history'][-1].get('content', '').strip() != completion_message.strip():
|
647 |
+
project_state['chat_history'].append({"role": "assistant", "content": completion_message})
|
648 |
+
elif project_state['status'] == 'Failed':
|
649 |
+
failure_message = "❌ Project failed to complete. Review logs and feedback for details."
|
650 |
+
if not project_state['chat_history'] or project_state['chat_history'][-1].get('content', '').strip() != failure_message.strip():
|
651 |
+
project_state['chat_history'].append({"role": "assistant", "content": failure_message})
|
652 |
|
653 |
|
654 |
# Return final state for UI update
|
|
|
657 |
project_state['logs'].get('build', 'No build logs.'),
|
658 |
project_state['logs'].get('run', 'No run logs.'),
|
659 |
(f'<iframe src="{project_state["iframe_url"]}" width="100%" height="500px"></iframe>'
|
660 |
+
+ ("" if project_state.get('iframe_ok') else "<p style='color:red;'>⚠️ iframe not responding or check failed.</p>")),
|
661 |
project_state['status_message'] # Return the final status message
|
662 |
)
|
663 |
|
|
|
674 |
profile: gr.OAuthProfile | None,
|
675 |
oauth_token: gr.OAuthToken | None # We need the token object
|
676 |
):
|
677 |
+
# Append the user's message to the history immediately for display
|
678 |
+
# Gradio's Chatbot usually handles this automatically, but explicitly ensures it's present
|
679 |
+
# before we start the orchestration.
|
680 |
+
# Check if the last message is the current user input to avoid duplication on submit/click
|
681 |
+
if not history or history[-1].get("content") != user_in.value: # Assuming user_in is accessible or passed
|
682 |
+
# This logic is tricky with Gradio's submit button.
|
683 |
+
# The default Chatbot behavior on submit/click is to add the user message.
|
684 |
+
# So, history *should* already contain the new user message.
|
685 |
+
# Let's rely on Gradio's standard behavior.
|
686 |
+
pass
|
687 |
+
|
688 |
+
if not profile or not oauth_token or not oauth_token.token:
|
689 |
# Append error message to history for display
|
690 |
+
error_history = history + [{"role":"assistant","content":"⚠️ Please log in first via the Hugging Face button."}]
|
691 |
+
# Return current state, logs etc. + the new history
|
692 |
return error_history, "", "", "<p>Please log in.</p>", "Login required."
|
693 |
|
694 |
if not gemini_api_key:
|
|
|
703 |
code_fn = "app.py" if sdk_choice == "gradio" else "streamlit_app.py" # Standard main file name convention
|
704 |
|
705 |
# Get the user's latest prompt from the history
|
706 |
+
# Assume the last message in history is the user's new prompt because Gradio added it
|
707 |
user_prompt = history[-1]['content'] if history and history[-1].get("role") == "user" else "No prompt provided."
|
708 |
+
|
709 |
+
if user_prompt == "No prompt provided." or user_prompt.strip() == "":
|
710 |
+
# Handle empty prompt case
|
711 |
+
empty_prompt_history = history + [{"role":"assistant","content":"Please enter requirements for the application."}]
|
712 |
+
return empty_prompt_history, "", "", "<p>Enter requirements.</p>", "Waiting for prompt."
|
|
|
713 |
|
714 |
|
715 |
# Initialize project state for this development session
|
|
|
720 |
'files': {}, # Use a dict to store multiple file contents {filename: code}
|
721 |
'logs': {'build': '', 'run': ''},
|
722 |
'feedback': '',
|
723 |
+
'current_task': 'START', # Start the orchestration state machine
|
724 |
'status': 'In Progress',
|
725 |
'status_message': 'Initializing...',
|
726 |
'attempt_count': 0,
|
|
|
741 |
|
742 |
# Start the orchestration process
|
743 |
final_history, final_build_logs, final_run_logs, final_iframe_html, final_status_message = orchestrate_development(
|
744 |
+
client, project_state, cfg, oauth_token.token # Pass the token string
|
745 |
)
|
746 |
|
747 |
# Return the final state for the UI
|
|
|
762 |
|
763 |
with gr.Row():
|
764 |
with gr.Column(scale=1):
|
765 |
+
# --- LOGIN BUTTON / PROFILE & MODEL LISTING (FIXED) ---
|
766 |
login_btn = gr.LoginButton(variant="huggingface", size="lg")
|
767 |
status_md = gr.Markdown("*Not logged in.*")
|
768 |
models_md = gr.Markdown()
|
769 |
+
|
770 |
+
# On app load, show “not logged in” and list public models (or none)
|
771 |
demo.load(show_profile, inputs=None, outputs=status_md, api_name="load_profile")
|
772 |
+
demo.load(list_private_models, inputs=None, outputs=models_md, api_name="load_models") # Inputs=None allows Gradio to inject profile/token (will be None initially)
|
773 |
+
|
774 |
+
# When the user actually logs in:
|
775 |
+
login_btn.click(
|
776 |
+
fn=show_profile,
|
777 |
+
inputs=None, # Gradio will inject OAuthProfile
|
778 |
+
outputs=status_md,
|
779 |
+
api_name="login_profile"
|
780 |
+
)
|
781 |
+
login_btn.click(
|
782 |
+
fn=list_private_models,
|
783 |
+
inputs=None, # Gradio will inject (OAuthProfile, OAuthToken)
|
784 |
+
outputs=models_md,
|
785 |
+
api_name="login_models"
|
786 |
+
)
|
787 |
+
# --- END FIX ---
|
788 |
|
789 |
|
790 |
gr.Markdown("---")
|
|
|
805 |
with gr.Accordion("Logs", open=False):
|
806 |
build_box = gr.Textbox(label="Build logs", lines=10, interactive=False, max_lines=20)
|
807 |
run_box = gr.Textbox(label="Run logs", lines=10, interactive=False, max_lines=20)
|
808 |
+
# Need login state for refresh button, inputs=None will inject profile/token
|
809 |
refresh_btn = gr.Button("🔄 Refresh Logs Only")
|
810 |
|
811 |
with gr.Accordion("App Preview", open=True):
|
812 |
preview = gr.HTML("<p>App preview will load here when available.</p>")
|
813 |
|
814 |
+
|
815 |
# Update the button click handler
|
816 |
# It will now return the updated chatbot history, logs, preview, and the project status
|
817 |
+
# Inputs=None allows Gradio to inject profile/token from login_btn
|
818 |
send_btn.click(
|
819 |
fn=handle_user_message,
|
820 |
+
inputs=[chatbot, sdk_choice, api_key, grounding, temp, max_tokens, login_btn, login_btn], # Keep login_btn twice for profile and token objects
|
821 |
outputs=[chatbot, build_box, run_box, preview, project_status_md]
|
822 |
)
|
823 |
user_in.submit(
|
824 |
fn=handle_user_message,
|
825 |
+
inputs=[chatbot, sdk_choice, api_key, grounding, temp, max_tokens, login_btn, login_btn], # Keep login_btn twice
|
826 |
outputs=[chatbot, build_box, run_box, preview, project_status_md]
|
827 |
)
|
828 |
|
829 |
# Handler for refreshing logs manually
|
830 |
+
# Inputs=None allows Gradio to inject profile/token from login_btn
|
831 |
refresh_btn.click(
|
832 |
fn=lambda profile, token: (
|
833 |
+
fetch_logs(f"{profile.username}/{profile.username}-auto-space", "build", token.token) if profile and token and token.token else "Login required to fetch logs.",
|
834 |
+
fetch_logs(f"{profile.username}/{profile.username}-auto-space", "run", token.token) if profile and token and token.token else "Login required to fetch logs."
|
835 |
),
|
836 |
+
inputs=[login_btn, login_btn], # Keep login_btn twice for profile and token objects
|
837 |
outputs=[build_box, run_box]
|
838 |
)
|
839 |
|
840 |
|
841 |
# Clean up files created during the process when the app stops (optional, good for Spaces)
|
842 |
+
# Consider adding more specific cleanup if needed
|
843 |
# demo.on_event("close", lambda: [os.remove(f) for f in os.listdir() if os.path.isfile(f) and (f.endswith(".py") or f.endswith(".txt") or f.endswith(".md"))]) # Be careful with this in production
|
844 |
|
845 |
demo.launch(server_name="0.0.0.0", server_port=7860)
|