Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,8 +10,11 @@ from huggingface_hub import (
|
|
10 |
)
|
11 |
from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
|
12 |
from google import genai
|
|
|
|
|
13 |
from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
|
14 |
|
|
|
15 |
# --- USER INFO & MODEL LISTING ---
|
16 |
|
17 |
def show_profile(profile: gr.OAuthProfile | None) -> str:
|
@@ -176,31 +179,25 @@ SYSTEM_DEBUG = {
|
|
176 |
|
177 |
def run_agent(client, model_name, system_prompt_template, user_input_state, config):
|
178 |
"""Helper to run a single agent interaction using the project state as input."""
|
179 |
-
# Format the system prompt using state variables if needed
|
180 |
try:
|
181 |
-
#
|
182 |
-
# For our current prompts, direct f-string formatting with known keys is fine
|
183 |
system_prompt = system_prompt_template["content"].format(**user_input_state)
|
184 |
except KeyError as e:
|
185 |
print(f"Error formatting system prompt: Missing key {e}. Prompt template: {system_prompt_template['content']}")
|
186 |
return f"ERROR: Internal agent error - Missing key {e} for prompt formatting.", None
|
187 |
|
188 |
-
# Prepare the message content
|
189 |
-
# We structure it clearly for the agent to read.
|
190 |
user_message_content = "Project State:\n" + json.dumps(user_input_state, indent=2)
|
191 |
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
|
196 |
try:
|
197 |
-
#
|
198 |
-
# model_to_use = "gemini-1.5-flash-latest" # Example of using a different model
|
199 |
-
model_to_use = model_name # Use the one passed in
|
200 |
-
|
201 |
response = client.models.generate_content(
|
202 |
model=model_to_use,
|
203 |
-
contents=
|
204 |
config=config
|
205 |
)
|
206 |
response.raise_for_status() # Raise an exception for bad status codes
|
@@ -212,7 +209,8 @@ def run_agent(client, model_name, system_prompt_template, user_input_state, conf
|
|
212 |
# print(response_text) # Careful: can be very long
|
213 |
print("----------------------")
|
214 |
|
215 |
-
|
|
|
216 |
|
217 |
except Exception as e:
|
218 |
print(f"Agent call failed: {e}")
|
@@ -220,13 +218,19 @@ def run_agent(client, model_name, system_prompt_template, user_input_state, conf
|
|
220 |
error_details = str(e)
|
221 |
if hasattr(e, 'response') and e.response is not None:
|
222 |
try:
|
223 |
-
|
|
|
|
|
224 |
except:
|
225 |
-
|
|
|
|
|
|
|
226 |
|
227 |
-
return f"ERROR: Agent failed - {error_details}"
|
228 |
|
229 |
# --- AGENT FUNCTIONS (called by Orchestrator) ---
|
|
|
230 |
|
231 |
def run_planner(client, project_state, config):
|
232 |
print("Orchestrator: Running Planner Agent...")
|
@@ -235,12 +239,11 @@ def run_planner(client, project_state, config):
|
|
235 |
"requirements": project_state['requirements'],
|
236 |
"sdk_choice": project_state['sdk_choice'],
|
237 |
"main_app_file": project_state['main_app_file'],
|
238 |
-
# Include
|
239 |
-
"files": project_state['files']
|
240 |
}
|
241 |
-
response_text
|
242 |
client=client,
|
243 |
-
model_name="gemini-1.5-flash-latest",
|
244 |
system_prompt_template=SYSTEM_ARCHITECT,
|
245 |
user_input_state=input_state_for_planner,
|
246 |
config=config,
|
@@ -253,6 +256,8 @@ def run_planner(client, project_state, config):
|
|
253 |
project_state['plan'] = response_text
|
254 |
print("Orchestrator: Planner Output Received.")
|
255 |
project_state['status_message'] = "Planning complete."
|
|
|
|
|
256 |
return True
|
257 |
|
258 |
def run_codegen(client, project_state, config):
|
@@ -267,9 +272,9 @@ def run_codegen(client, project_state, config):
|
|
267 |
"sdk_choice": project_state['sdk_choice'],
|
268 |
"main_app_file": project_state['main_app_file'] # Ensure it knows the main file convention
|
269 |
}
|
270 |
-
response_text
|
271 |
client=client,
|
272 |
-
model_name="gemini-1.5-flash-latest",
|
273 |
system_prompt_template=SYSTEM_CODEGEN,
|
274 |
user_input_state=input_state_for_codegen,
|
275 |
config=config,
|
@@ -277,30 +282,30 @@ def run_codegen(client, project_state, config):
|
|
277 |
|
278 |
if response_text.startswith("ERROR:"):
|
279 |
project_state['status_message'] = response_text
|
|
|
|
|
280 |
return False # Indicate failure
|
281 |
|
282 |
# Parse the response text to extract code blocks for potentially multiple files
|
283 |
files_updated = {}
|
284 |
# Regex to find blocks like `filename`\n```[language]\ncode...\n```
|
285 |
-
# It captures the filename (group 1) and the code content (group 2)
|
286 |
-
# Added handling for optional language tag after the triple backticks
|
287 |
blocks = re.findall(r"(`[^`]+`)\s*```(?:\w*\n)?([\s\S]*?)```", response_text)
|
288 |
|
289 |
if not blocks:
|
290 |
print("Code-Gen Agent did not output any code blocks in expected format.")
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
|
|
295 |
return False # Indicate failure
|
296 |
|
297 |
syntax_errors = []
|
298 |
for filename_match, code_content in blocks:
|
299 |
filename = filename_match.strip('`').strip()
|
300 |
if not filename:
|
301 |
-
print(f"Warning: Found a code block but filename was empty: {code_content[:50]}...")
|
302 |
syntax_errors.append(f"Code block found with empty filename.")
|
303 |
-
continue
|
304 |
|
305 |
files_updated[filename] = code_content.strip() # Store updated code
|
306 |
|
@@ -308,7 +313,7 @@ def run_codegen(client, project_state, config):
|
|
308 |
if filename.endswith('.py'):
|
309 |
try:
|
310 |
compile(code_content, filename, "exec")
|
311 |
-
print(f"Syntax check passed for {filename}")
|
312 |
except SyntaxError as e:
|
313 |
syntax_errors.append(f"Syntax Error in {filename}: {e}")
|
314 |
print(f"Syntax Error in {filename}: {e}")
|
@@ -319,13 +324,17 @@ def run_codegen(client, project_state, config):
|
|
319 |
|
320 |
if not files_updated:
|
321 |
print("Code-Gen Agent outputted blocks but couldn't parse any valid filenames.")
|
322 |
-
|
|
|
|
|
|
|
323 |
return False # Indicate failure
|
324 |
|
325 |
if syntax_errors:
|
326 |
# If syntax errors found, add them to feedback and signal failure for CodeGen step
|
327 |
-
|
328 |
-
project_state['
|
|
|
329 |
# Add syntax errors to chat history for user visibility
|
330 |
project_state['chat_history'].append({"role": "assistant", "content": project_state['status_message']})
|
331 |
project_state['chat_history'].append({"role": "assistant", "content": "Details:\n" + "\n".join(syntax_errors)})
|
@@ -335,7 +344,7 @@ def run_codegen(client, project_state, config):
|
|
335 |
project_state['files'].update(files_updated) # Update existing files or add new ones
|
336 |
print(f"Orchestrator: Code-Gen Agent updated files: {list(files_updated.keys())}")
|
337 |
|
338 |
-
# Add the generated/updated code content to the
|
339 |
code_summary = "\n".join([f"`{fn}`:\n```python\n{code[:500]}{'...' if len(code) > 500 else ''}\n```" for fn, code in files_updated.items()]) # Show snippet
|
340 |
project_state['chat_history'].append({"role": "assistant", "content": f"**Code Generated/Updated:**\n\n{code_summary}"})
|
341 |
project_state['status_message'] = f"Code generated/updated: {list(files_updated.keys())}"
|
@@ -355,9 +364,9 @@ def run_debugger(client, project_state, config):
|
|
355 |
"iframe_status": 'Responding OK' if project_state.get('iframe_ok', False) else 'Not responding or check failed.',
|
356 |
"error_types_found": classify_errors(project_state['logs'].get('build', '') + '\n' + project_state['logs'].get('run', ''))
|
357 |
}
|
358 |
-
response_text
|
359 |
client=client,
|
360 |
-
model_name="gemini-1.5-flash-latest",
|
361 |
system_prompt_template=SYSTEM_DEBUG,
|
362 |
user_input_state=input_state_for_debugger,
|
363 |
config=config,
|
@@ -365,19 +374,20 @@ def run_debugger(client, project_state, config):
|
|
365 |
|
366 |
if response_text.startswith("ERROR:"):
|
367 |
project_state['status_message'] = response_text
|
|
|
|
|
|
|
368 |
return False # Indicate failure
|
369 |
|
370 |
project_state['feedback'] = response_text
|
371 |
print("Orchestrator: Debug Agent Feedback Received.")
|
372 |
project_state['status_message'] = "Debug feedback generated."
|
373 |
-
# Add debug feedback to chat history
|
|
|
374 |
return True
|
375 |
|
376 |
# --- MAIN ORCHESTRATION LOGIC ---
|
377 |
|
378 |
-
# We use Python functions to represent the state transitions and agent calls
|
379 |
-
# This is more deterministic than using an LLM purely for orchestration.
|
380 |
-
|
381 |
def orchestrate_development(client, project_state, config, oauth_token_token):
|
382 |
"""Manages the overall development workflow."""
|
383 |
|
@@ -385,6 +395,8 @@ def orchestrate_development(client, project_state, config, oauth_token_token):
|
|
385 |
if project_state['current_task'] == 'START':
|
386 |
project_state['current_task'] = 'PLANNING'
|
387 |
project_state['status_message'] = "Starting project: Initializing and moving to Planning."
|
|
|
|
|
388 |
|
389 |
|
390 |
while project_state['status'] == 'In Progress' and project_state['attempt_count'] < 7:
|
@@ -392,13 +404,10 @@ def orchestrate_development(client, project_state, config, oauth_token_token):
|
|
392 |
print(f"Current Task: {project_state['current_task']}")
|
393 |
current_task = project_state['current_task']
|
394 |
|
395 |
-
# Add current task to history for UI visibility
|
396 |
-
|
397 |
-
if not project_state['chat_history'] or (
|
398 |
-
project_state['chat_history']
|
399 |
-
or not project_state['chat_history'][-1].get('content', '').startswith('➡️ Task:')
|
400 |
-
):
|
401 |
-
project_state['chat_history'].append({"role": "assistant", "content": f"➡️ Task: {current_task}"})
|
402 |
|
403 |
|
404 |
step_successful = True # Flag to track if the current step completed without error
|
@@ -406,14 +415,9 @@ def orchestrate_development(client, project_state, config, oauth_token_token):
|
|
406 |
if current_task == 'PLANNING':
|
407 |
step_successful = run_planner(client, project_state, config)
|
408 |
if step_successful:
|
409 |
-
# Add plan to chat history for user (done inside run_planner now?) - moved to run_planner
|
410 |
project_state['current_task'] = 'CODING - Initial Implementation' # Move to coding after planning
|
411 |
-
# Add plan to chat history if it wasn't added by run_planner (depends on its implementation)
|
412 |
-
if project_state['plan'] and not any("**Plan:**" in msg['content'] for msg in project_state['chat_history']):
|
413 |
-
project_state['chat_history'].append({"role": "assistant", "content": f"**Plan:**\n{project_state['plan']}"})
|
414 |
-
|
415 |
else:
|
416 |
-
project_state['current_task'] = 'FAILED' # Planning failed
|
417 |
|
418 |
|
419 |
elif current_task.startswith('CODING'):
|
@@ -502,15 +506,15 @@ This is an auto-generated HF Space.
|
|
502 |
# Wait a moment for build to start
|
503 |
time.sleep(5) # Initial wait
|
504 |
wait_time = 5
|
505 |
-
max_log_wait = 120 # Maximum total time to wait for logs in this step
|
506 |
elapsed_log_wait = 0
|
507 |
logs_fetched = False
|
508 |
iframe_checked = False
|
509 |
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
|
515 |
|
516 |
while elapsed_log_wait < max_log_wait:
|
@@ -522,8 +526,7 @@ This is an auto-generated HF Space.
|
|
522 |
logs_fetched = True
|
523 |
|
524 |
# Only check iframe once logs indicate something might be running, or after a delay
|
525 |
-
|
526 |
-
if elapsed_log_wait > 10 or len(run_logs) > 0 or len(build_logs) > 100: # Also check if build logs are substantial
|
527 |
project_state['iframe_ok'] = check_iframe(project_state['iframe_url'])
|
528 |
iframe_checked = True
|
529 |
else:
|
@@ -562,7 +565,7 @@ This is an auto-generated HF Space.
|
|
562 |
if logs_fetched or iframe_checked: # Proceed if we got logs OR checked the iframe
|
563 |
project_state['status_message'] = "Logs fetched and iframe checked (or timeout reached)."
|
564 |
else:
|
565 |
-
project_state['status_message'] = "Warning: Could not fetch logs or check iframe status within timeout."
|
566 |
step_successful = False # Indicate that this step didn't fully succeed
|
567 |
|
568 |
|
@@ -573,22 +576,13 @@ This is an auto-generated HF Space.
|
|
573 |
elif current_task == 'DEBUGGING':
|
574 |
step_successful = run_debugger(client, project_state, config)
|
575 |
|
576 |
-
#
|
577 |
-
project_state['chat_history'].append({"role": "assistant", "content": f"**Debug Feedback:**\n{project_state['feedback']}"})
|
578 |
|
579 |
|
580 |
if step_successful:
|
581 |
# Analyze feedback to decide next step
|
582 |
feedback = project_state['feedback']
|
583 |
iframe_ok = project_state.get('iframe_ok', False)
|
584 |
-
# Re-check logs just before making the decision for freshest info if possible
|
585 |
-
# This might add latency, skipping for now, rely on logs fetched in LOGGING step
|
586 |
-
# build_logs = fetch_logs(project_state['repo_id'], "build", oauth_token_token)
|
587 |
-
# run_logs = fetch_logs(project_state['repo_id'], "run", oauth_token_token)
|
588 |
-
# error_types = classify_errors(build_logs + '\n' + run_logs)
|
589 |
-
# project_state['logs']['build'] = build_logs # Update state with freshest logs
|
590 |
-
# project_state['logs']['run'] = run_logs
|
591 |
-
|
592 |
error_types = classify_errors(project_state['logs'].get('build', '') + '\n' + project_state['logs'].get('run', ''))
|
593 |
|
594 |
|
@@ -598,7 +592,7 @@ This is an auto-generated HF Space.
|
|
598 |
# Decision Logic:
|
599 |
# 1. Success? Debugger says clear AND iframe works AND no/minor errors in logs AND run logs have some content
|
600 |
is_complete = ("All clear. Project appears complete." in feedback) or \
|
601 |
-
(iframe_ok and error_types == "none" and "ERROR" not in feedback.upper() and len(project_state['logs'].get('run', '')) > 10)
|
602 |
|
603 |
if is_complete:
|
604 |
project_state['status'] = 'Complete'
|
@@ -621,7 +615,7 @@ This is an auto-generated HF Space.
|
|
621 |
# Debugger failed (e.g. API error)
|
622 |
project_state['status'] = 'Failed'
|
623 |
project_state['current_task'] = 'FINISHED'
|
624 |
-
# status_message already set by run_debugger
|
625 |
|
626 |
|
627 |
elif current_task == 'FINISHED':
|
@@ -642,9 +636,6 @@ This is an auto-generated HF Space.
|
|
642 |
# the orchestrator logic above should handle transition to FAILED or DEBUGGING.
|
643 |
# This check acts as a safeguard.
|
644 |
if not step_successful and project_state['status'] == 'In Progress':
|
645 |
-
# This case might occur if a sub-function returns False but doesn't set status='Failed'
|
646 |
-
# The logic above *should* set status_message and transition away from the failed task,
|
647 |
-
# but this ensures the overall loop exits if something goes wrong.
|
648 |
print(f"Orchestration step '{current_task}' failed, but status is still 'In Progress'. Forcing Failure.")
|
649 |
project_state['status'] = 'Failed'
|
650 |
project_state['status_message'] = project_state.get('status_message', f'An unexpected error caused task failure: {current_task}')
|
@@ -656,14 +647,12 @@ This is an auto-generated HF Space.
|
|
656 |
|
657 |
# Final status message if loop exited without explicit FINISHED state
|
658 |
if project_state['status'] == 'In Progress':
|
659 |
-
# This shouldn't happen with the current logic, but good practice
|
660 |
project_state['status'] = 'Failed'
|
661 |
project_state['status_message'] = project_state.get('status_message', 'Orchestration loop exited unexpectedly.')
|
662 |
|
663 |
|
664 |
# Add final outcome message to history if not already the last message
|
665 |
final_outcome_message = f"**Project Outcome:** {project_state['status']} - {project_state['status_message']}"
|
666 |
-
# Prevent adding duplicate messages if they were already added inside the loop
|
667 |
if not project_state['chat_history'] or project_state['chat_history'][-1].get('content', '').strip() != final_outcome_message.strip():
|
668 |
project_state['chat_history'].append({"role": "assistant", "content": final_outcome_message})
|
669 |
|
@@ -706,17 +695,12 @@ def handle_user_message(
|
|
706 |
# We need to add it to the history list here at the beginning,
|
707 |
# as Gradio's Chatbot expects the handler to return the *updated* history.
|
708 |
# Check if the last message is *not* a user message or is empty to avoid duplicates
|
709 |
-
# (Gradio's default Chatbot adds the user message automatically on submit,
|
710 |
-
# but explicit handling here is safer if using user_in separately)
|
711 |
-
# A safer check: If the last message is the exact user_input, assume Gradio added it.
|
712 |
-
# Otherwise, add it.
|
713 |
if not history or history[-1].get("role") != "user" or history[-1].get("content") != user_input:
|
714 |
-
|
715 |
|
716 |
|
717 |
if not profile or not oauth_token or not oauth_token.token:
|
718 |
# Append error message to history for display
|
719 |
-
# Check if error message is already present to avoid spamming
|
720 |
error_msg = "⚠️ Please log in first via the Hugging Face button."
|
721 |
if not history or history[-1].get("content") != error_msg:
|
722 |
history.append({"role":"assistant","content":error_msg})
|
@@ -851,7 +835,7 @@ with gr.Blocks(title="HF Space Auto‑Builder (Team AI)") as demo:
|
|
851 |
preview = gr.HTML("<p>App preview will load here when available.</p>")
|
852 |
|
853 |
|
854 |
-
# Update the button click handler -
|
855 |
# Gradio will auto-inject profile and token based on the function signature
|
856 |
send_btn.click(
|
857 |
fn=handle_user_message,
|
@@ -863,12 +847,13 @@ with gr.Blocks(title="HF Space Auto‑Builder (Team AI)") as demo:
|
|
863 |
grounding,
|
864 |
temp,
|
865 |
max_tokens,
|
866 |
-
#
|
|
|
867 |
],
|
868 |
outputs=[chatbot, build_box, run_box, preview, project_status_md]
|
869 |
)
|
870 |
|
871 |
-
# Update the submit handler -
|
872 |
user_in.submit(
|
873 |
fn=handle_user_message,
|
874 |
inputs=[
|
@@ -879,7 +864,8 @@ with gr.Blocks(title="HF Space Auto‑Builder (Team AI)") as demo:
|
|
879 |
grounding,
|
880 |
temp,
|
881 |
max_tokens,
|
882 |
-
|
|
|
883 |
],
|
884 |
outputs=[chatbot, build_box, run_box, preview, project_status_md]
|
885 |
)
|
|
|
10 |
)
|
11 |
from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
|
12 |
from google import genai
|
13 |
+
# Import Content specifically if we were using Option B, but sticking to Option A
|
14 |
+
# from google.genai.types import Content
|
15 |
from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
|
16 |
|
17 |
+
|
18 |
# --- USER INFO & MODEL LISTING ---
|
19 |
|
20 |
def show_profile(profile: gr.OAuthProfile | None) -> str:
|
|
|
179 |
|
180 |
def run_agent(client, model_name, system_prompt_template, user_input_state, config):
|
181 |
"""Helper to run a single agent interaction using the project state as input."""
|
|
|
182 |
try:
|
183 |
+
# Format the system prompt using state variables
|
|
|
184 |
system_prompt = system_prompt_template["content"].format(**user_input_state)
|
185 |
except KeyError as e:
|
186 |
print(f"Error formatting system prompt: Missing key {e}. Prompt template: {system_prompt_template['content']}")
|
187 |
return f"ERROR: Internal agent error - Missing key {e} for prompt formatting.", None
|
188 |
|
189 |
+
# Prepare the message content by formatting the project state
|
|
|
190 |
user_message_content = "Project State:\n" + json.dumps(user_input_state, indent=2)
|
191 |
|
192 |
+
# *** FIX: Combine system prompt and user state into a single string and pass as a list of strings ***
|
193 |
+
# This matches the format expected by generate_content for simple, non-chat inputs.
|
194 |
+
prompt = system_prompt + "\n\n" + user_message_content
|
195 |
|
196 |
try:
|
197 |
+
# Pass a list containing the single combined prompt string
|
|
|
|
|
|
|
198 |
response = client.models.generate_content(
|
199 |
model=model_to_use,
|
200 |
+
contents=[prompt], # <-- Corrected: Pass a list of strings
|
201 |
config=config
|
202 |
)
|
203 |
response.raise_for_status() # Raise an exception for bad status codes
|
|
|
209 |
# print(response_text) # Careful: can be very long
|
210 |
print("----------------------")
|
211 |
|
212 |
+
# Return just the response text. The calling functions manage the project_state history.
|
213 |
+
return response_text.strip()
|
214 |
|
215 |
except Exception as e:
|
216 |
print(f"Agent call failed: {e}")
|
|
|
218 |
error_details = str(e)
|
219 |
if hasattr(e, 'response') and e.response is not None:
|
220 |
try:
|
221 |
+
# Check if response has a usable text body or error structure
|
222 |
+
error_json = e.response.json()
|
223 |
+
error_details = json.dumps(error_json, indent=2)
|
224 |
except:
|
225 |
+
try:
|
226 |
+
error_details = e.response.text # Fallback to raw text
|
227 |
+
except:
|
228 |
+
pass # Cannot get response text
|
229 |
|
230 |
+
return f"ERROR: Agent failed - {error_details}" # Indicate failure
|
231 |
|
232 |
# --- AGENT FUNCTIONS (called by Orchestrator) ---
|
233 |
+
# These functions now expect only the response text from run_agent
|
234 |
|
235 |
def run_planner(client, project_state, config):
|
236 |
print("Orchestrator: Running Planner Agent...")
|
|
|
239 |
"requirements": project_state['requirements'],
|
240 |
"sdk_choice": project_state['sdk_choice'],
|
241 |
"main_app_file": project_state['main_app_file'],
|
242 |
+
"files": project_state['files'] # Include existing files
|
|
|
243 |
}
|
244 |
+
response_text = run_agent(
|
245 |
client=client,
|
246 |
+
model_name="gemini-1.5-flash-latest",
|
247 |
system_prompt_template=SYSTEM_ARCHITECT,
|
248 |
user_input_state=input_state_for_planner,
|
249 |
config=config,
|
|
|
256 |
project_state['plan'] = response_text
|
257 |
print("Orchestrator: Planner Output Received.")
|
258 |
project_state['status_message'] = "Planning complete."
|
259 |
+
# Add plan to chat history for user
|
260 |
+
project_state['chat_history'].append({"role": "assistant", "content": f"**Plan:**\n{project_state['plan']}"})
|
261 |
return True
|
262 |
|
263 |
def run_codegen(client, project_state, config):
|
|
|
272 |
"sdk_choice": project_state['sdk_choice'],
|
273 |
"main_app_file": project_state['main_app_file'] # Ensure it knows the main file convention
|
274 |
}
|
275 |
+
response_text = run_agent(
|
276 |
client=client,
|
277 |
+
model_name="gemini-1.5-flash-latest",
|
278 |
system_prompt_template=SYSTEM_CODEGEN,
|
279 |
user_input_state=input_state_for_codegen,
|
280 |
config=config,
|
|
|
282 |
|
283 |
if response_text.startswith("ERROR:"):
|
284 |
project_state['status_message'] = response_text
|
285 |
+
# The error message added here will be processed by the debugger
|
286 |
+
# No need to add to chat history here, debugger feedback will summarize
|
287 |
return False # Indicate failure
|
288 |
|
289 |
# Parse the response text to extract code blocks for potentially multiple files
|
290 |
files_updated = {}
|
291 |
# Regex to find blocks like `filename`\n```[language]\ncode...\n```
|
|
|
|
|
292 |
blocks = re.findall(r"(`[^`]+`)\s*```(?:\w*\n)?([\s\S]*?)```", response_text)
|
293 |
|
294 |
if not blocks:
|
295 |
print("Code-Gen Agent did not output any code blocks in expected format.")
|
296 |
+
parse_error_msg = "ERROR: Code-Gen Agent failed to output code blocks in `filename`\\n```code``` format."
|
297 |
+
project_state['status_message'] = parse_error_msg
|
298 |
+
# Add the agent's raw response to feedback for debugging
|
299 |
+
project_state['feedback'] = project_state['feedback'] + "\n\n" + parse_error_msg + "\nRaw Agent Response (no code blocks detected):\n" + response_text[:1000] + "..." # Add truncated raw response
|
300 |
+
project_state['chat_history'].append({"role": "assistant", "content": parse_error_msg + "\nSee Debug Feedback for raw response."})
|
301 |
return False # Indicate failure
|
302 |
|
303 |
syntax_errors = []
|
304 |
for filename_match, code_content in blocks:
|
305 |
filename = filename_match.strip('`').strip()
|
306 |
if not filename:
|
|
|
307 |
syntax_errors.append(f"Code block found with empty filename.")
|
308 |
+
continue # Skip this block
|
309 |
|
310 |
files_updated[filename] = code_content.strip() # Store updated code
|
311 |
|
|
|
313 |
if filename.endswith('.py'):
|
314 |
try:
|
315 |
compile(code_content, filename, "exec")
|
316 |
+
# print(f"Syntax check passed for {filename}") # Too verbose
|
317 |
except SyntaxError as e:
|
318 |
syntax_errors.append(f"Syntax Error in {filename}: {e}")
|
319 |
print(f"Syntax Error in {filename}: {e}")
|
|
|
324 |
|
325 |
if not files_updated:
|
326 |
print("Code-Gen Agent outputted blocks but couldn't parse any valid filenames.")
|
327 |
+
parse_error_msg = "ERROR: Code-Gen Agent outputted blocks but couldn't parse any valid filenames."
|
328 |
+
project_state['status_message'] = parse_error_msg
|
329 |
+
project_state['feedback'] = project_state['feedback'] + "\n\n" + parse_error_msg
|
330 |
+
project_state['chat_history'].append({"role": "assistant", "content": parse_error_msg})
|
331 |
return False # Indicate failure
|
332 |
|
333 |
if syntax_errors:
|
334 |
# If syntax errors found, add them to feedback and signal failure for CodeGen step
|
335 |
+
syntax_error_msg = "ERROR: Code-Gen Agent introduced syntax errors."
|
336 |
+
project_state['feedback'] = syntax_error_msg + "\n" + "\n".join(syntax_errors) + "\n\n" + project_state.get('feedback', '') # Prepend errors
|
337 |
+
project_state['status_message'] = syntax_error_msg + " Debugging needed."
|
338 |
# Add syntax errors to chat history for user visibility
|
339 |
project_state['chat_history'].append({"role": "assistant", "content": project_state['status_message']})
|
340 |
project_state['chat_history'].append({"role": "assistant", "content": "Details:\n" + "\n".join(syntax_errors)})
|
|
|
344 |
project_state['files'].update(files_updated) # Update existing files or add new ones
|
345 |
print(f"Orchestrator: Code-Gen Agent updated files: {list(files_updated.keys())}")
|
346 |
|
347 |
+
# Add the generated/updated code content snippet to the chat history for visibility
|
348 |
code_summary = "\n".join([f"`{fn}`:\n```python\n{code[:500]}{'...' if len(code) > 500 else ''}\n```" for fn, code in files_updated.items()]) # Show snippet
|
349 |
project_state['chat_history'].append({"role": "assistant", "content": f"**Code Generated/Updated:**\n\n{code_summary}"})
|
350 |
project_state['status_message'] = f"Code generated/updated: {list(files_updated.keys())}"
|
|
|
364 |
"iframe_status": 'Responding OK' if project_state.get('iframe_ok', False) else 'Not responding or check failed.',
|
365 |
"error_types_found": classify_errors(project_state['logs'].get('build', '') + '\n' + project_state['logs'].get('run', ''))
|
366 |
}
|
367 |
+
response_text = run_agent(
|
368 |
client=client,
|
369 |
+
model_name="gemini-1.5-flash-latest",
|
370 |
system_prompt_template=SYSTEM_DEBUG,
|
371 |
user_input_state=input_state_for_debugger,
|
372 |
config=config,
|
|
|
374 |
|
375 |
if response_text.startswith("ERROR:"):
|
376 |
project_state['status_message'] = response_text
|
377 |
+
# Add the debugger error to feedback for visibility
|
378 |
+
project_state['feedback'] = project_state.get('feedback', '') + "\n\n" + response_text
|
379 |
+
project_state['chat_history'].append({"role": "assistant", "content": response_text}) # Add error to chat
|
380 |
return False # Indicate failure
|
381 |
|
382 |
project_state['feedback'] = response_text
|
383 |
print("Orchestrator: Debug Agent Feedback Received.")
|
384 |
project_state['status_message'] = "Debug feedback generated."
|
385 |
+
# Add debug feedback to chat history
|
386 |
+
project_state['chat_history'].append({"role": "assistant", "content": f"**Debug Feedback:**\n{project_state['feedback']}"})
|
387 |
return True
|
388 |
|
389 |
# --- MAIN ORCHESTRATION LOGIC ---
|
390 |
|
|
|
|
|
|
|
391 |
def orchestrate_development(client, project_state, config, oauth_token_token):
|
392 |
"""Manages the overall development workflow."""
|
393 |
|
|
|
395 |
if project_state['current_task'] == 'START':
|
396 |
project_state['current_task'] = 'PLANNING'
|
397 |
project_state['status_message'] = "Starting project: Initializing and moving to Planning."
|
398 |
+
# Add initial message to chat history
|
399 |
+
project_state['chat_history'].append({"role": "assistant", "content": "Project initialized. Starting development team."})
|
400 |
|
401 |
|
402 |
while project_state['status'] == 'In Progress' and project_state['attempt_count'] < 7:
|
|
|
404 |
print(f"Current Task: {project_state['current_task']}")
|
405 |
current_task = project_state['current_task']
|
406 |
|
407 |
+
# Add current task to history for UI visibility
|
408 |
+
task_message = f"➡️ Task: {current_task}"
|
409 |
+
if not project_state['chat_history'] or project_state['chat_history'][-1].get('content', '').strip() != task_message.strip():
|
410 |
+
project_state['chat_history'].append({"role": "assistant", "content": task_message})
|
|
|
|
|
|
|
411 |
|
412 |
|
413 |
step_successful = True # Flag to track if the current step completed without error
|
|
|
415 |
if current_task == 'PLANNING':
|
416 |
step_successful = run_planner(client, project_state, config)
|
417 |
if step_successful:
|
|
|
418 |
project_state['current_task'] = 'CODING - Initial Implementation' # Move to coding after planning
|
|
|
|
|
|
|
|
|
419 |
else:
|
420 |
+
project_state['current_task'] = 'FAILED' # Planning failed, end process
|
421 |
|
422 |
|
423 |
elif current_task.startswith('CODING'):
|
|
|
506 |
# Wait a moment for build to start
|
507 |
time.sleep(5) # Initial wait
|
508 |
wait_time = 5
|
509 |
+
max_log_wait = 120 # Maximum total time to wait for logs in this step
|
510 |
elapsed_log_wait = 0
|
511 |
logs_fetched = False
|
512 |
iframe_checked = False
|
513 |
|
514 |
+
status_logging_message = "Fetching logs and checking iframe..."
|
515 |
+
if not project_state['chat_history'] or project_state['chat_history'][-1].get('content', '').strip() != status_logging_message.strip():
|
516 |
+
project_state['chat_history'].append({"role": "assistant", "content": status_logging_message})
|
517 |
+
project_state['status_message'] = status_logging_message
|
518 |
|
519 |
|
520 |
while elapsed_log_wait < max_log_wait:
|
|
|
526 |
logs_fetched = True
|
527 |
|
528 |
# Only check iframe once logs indicate something might be running, or after a delay
|
529 |
+
if elapsed_log_wait > 10 or len(run_logs) > 0 or len(build_logs) > 100:
|
|
|
530 |
project_state['iframe_ok'] = check_iframe(project_state['iframe_url'])
|
531 |
iframe_checked = True
|
532 |
else:
|
|
|
565 |
if logs_fetched or iframe_checked: # Proceed if we got logs OR checked the iframe
|
566 |
project_state['status_message'] = "Logs fetched and iframe checked (or timeout reached)."
|
567 |
else:
|
568 |
+
project_state['status_message'] = "Warning: Could not fetch logs or check iframe status within timeout."
|
569 |
step_successful = False # Indicate that this step didn't fully succeed
|
570 |
|
571 |
|
|
|
576 |
elif current_task == 'DEBUGGING':
|
577 |
step_successful = run_debugger(client, project_state, config)
|
578 |
|
579 |
+
# Debug feedback is added to chat history inside run_debugger now
|
|
|
580 |
|
581 |
|
582 |
if step_successful:
|
583 |
# Analyze feedback to decide next step
|
584 |
feedback = project_state['feedback']
|
585 |
iframe_ok = project_state.get('iframe_ok', False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
586 |
error_types = classify_errors(project_state['logs'].get('build', '') + '\n' + project_state['logs'].get('run', ''))
|
587 |
|
588 |
|
|
|
592 |
# Decision Logic:
|
593 |
# 1. Success? Debugger says clear AND iframe works AND no/minor errors in logs AND run logs have some content
|
594 |
is_complete = ("All clear. Project appears complete." in feedback) or \
|
595 |
+
(iframe_ok and error_types == "none" and "ERROR" not in feedback.upper() and len(project_state['logs'].get('run', '')) > 10)
|
596 |
|
597 |
if is_complete:
|
598 |
project_state['status'] = 'Complete'
|
|
|
615 |
# Debugger failed (e.g. API error)
|
616 |
project_state['status'] = 'Failed'
|
617 |
project_state['current_task'] = 'FINISHED'
|
618 |
+
# status_message and feedback already set by run_debugger
|
619 |
|
620 |
|
621 |
elif current_task == 'FINISHED':
|
|
|
636 |
# the orchestrator logic above should handle transition to FAILED or DEBUGGING.
|
637 |
# This check acts as a safeguard.
|
638 |
if not step_successful and project_state['status'] == 'In Progress':
|
|
|
|
|
|
|
639 |
print(f"Orchestration step '{current_task}' failed, but status is still 'In Progress'. Forcing Failure.")
|
640 |
project_state['status'] = 'Failed'
|
641 |
project_state['status_message'] = project_state.get('status_message', f'An unexpected error caused task failure: {current_task}')
|
|
|
647 |
|
648 |
# Final status message if loop exited without explicit FINISHED state
|
649 |
if project_state['status'] == 'In Progress':
|
|
|
650 |
project_state['status'] = 'Failed'
|
651 |
project_state['status_message'] = project_state.get('status_message', 'Orchestration loop exited unexpectedly.')
|
652 |
|
653 |
|
654 |
# Add final outcome message to history if not already the last message
|
655 |
final_outcome_message = f"**Project Outcome:** {project_state['status']} - {project_state['status_message']}"
|
|
|
656 |
if not project_state['chat_history'] or project_state['chat_history'][-1].get('content', '').strip() != final_outcome_message.strip():
|
657 |
project_state['chat_history'].append({"role": "assistant", "content": final_outcome_message})
|
658 |
|
|
|
695 |
# We need to add it to the history list here at the beginning,
|
696 |
# as Gradio's Chatbot expects the handler to return the *updated* history.
|
697 |
# Check if the last message is *not* a user message or is empty to avoid duplicates
|
|
|
|
|
|
|
|
|
698 |
if not history or history[-1].get("role") != "user" or history[-1].get("content") != user_input:
|
699 |
+
history.append({"role": "user", "content": user_input})
|
700 |
|
701 |
|
702 |
if not profile or not oauth_token or not oauth_token.token:
|
703 |
# Append error message to history for display
|
|
|
704 |
error_msg = "⚠️ Please log in first via the Hugging Face button."
|
705 |
if not history or history[-1].get("content") != error_msg:
|
706 |
history.append({"role":"assistant","content":error_msg})
|
|
|
835 |
preview = gr.HTML("<p>App preview will load here when available.</p>")
|
836 |
|
837 |
|
838 |
+
# Update the button click handler - inputs now match the handle_user_message signature (7 inputs)
|
839 |
# Gradio will auto-inject profile and token based on the function signature
|
840 |
send_btn.click(
|
841 |
fn=handle_user_message,
|
|
|
847 |
grounding,
|
848 |
temp,
|
849 |
max_tokens,
|
850 |
+
# profile (auto-injected)
|
851 |
+
# oauth_token (auto-injected)
|
852 |
],
|
853 |
outputs=[chatbot, build_box, run_box, preview, project_status_md]
|
854 |
)
|
855 |
|
856 |
+
# Update the submit handler - inputs now match the handle_user_message signature (7 inputs)
|
857 |
user_in.submit(
|
858 |
fn=handle_user_message,
|
859 |
inputs=[
|
|
|
864 |
grounding,
|
865 |
temp,
|
866 |
max_tokens,
|
867 |
+
# profile (auto-injected)
|
868 |
+
# oauth_token (auto-injected)
|
869 |
],
|
870 |
outputs=[chatbot, build_box, run_box, preview, project_status_md]
|
871 |
)
|