wuhp commited on
Commit
ad264a2
·
verified ·
1 Parent(s): d98bef8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -13
app.py CHANGED
@@ -10,8 +10,8 @@ from huggingface_hub import (
10
  )
11
  from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
12
  from google import genai
13
- # Import Content type for structured multi-turn/multi-role inputs
14
- from google.genai.types import Content
15
  from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
16
 
17
 
@@ -193,10 +193,10 @@ def run_agent(client, model_name, system_prompt_template, user_input_state, conf
193
  model_to_use = model_name
194
 
195
  try:
196
- # *** FIX: Use Content objects for system and user roles ***
197
  messages = [
198
- Content(role="system", parts=[system_prompt]), # System instruction as a part
199
- Content(role="user", parts=[user_message_content]) # User state as a part
200
  ]
201
 
202
  response = client.models.generate_content(
@@ -210,8 +210,14 @@ def run_agent(client, model_name, system_prompt_template, user_input_state, conf
210
  # Ensure candidate and content exist before accessing parts
211
  if not response.candidates or not response.candidates[0].content:
212
  print("Agent returned no candidates or empty content.")
 
 
 
 
 
213
  return f"ERROR: Agent returned no response content."
214
 
 
215
  response_text = "".join([part.text for part in response.candidates[0].content.parts])
216
 
217
  print(f"--- Agent Response --- ({model_to_use})")
@@ -252,7 +258,7 @@ def run_planner(client, project_state, config):
252
  }
253
  response_text = run_agent(
254
  client=client,
255
- model_name="gemini-2.5-flash-preview-04-17", # *** FIX: Use the specific model name ***
256
  system_prompt_template=SYSTEM_ARCHITECT,
257
  user_input_state=input_state_for_planner,
258
  config=config,
@@ -283,7 +289,7 @@ def run_codegen(client, project_state, config):
283
  }
284
  response_text = run_agent(
285
  client=client,
286
- model_name="gemini-2.5-flash-preview-04-17", # *** FIX: Use the specific model name ***
287
  system_prompt_template=SYSTEM_CODEGEN,
288
  user_input_state=input_state_for_codegen,
289
  config=config,
@@ -375,7 +381,7 @@ def run_debugger(client, project_state, config):
375
  }
376
  response_text = run_agent(
377
  client=client,
378
- model_name="gemini-2.5-flash-preview-04-17", # *** FIX: Use the specific model name ***
379
  system_prompt_template=SYSTEM_DEBUG,
380
  user_input_state=input_state_for_debugger,
381
  config=config,
@@ -435,21 +441,26 @@ def orchestrate_development(client, project_state, config, oauth_token_token):
435
 
436
  elif current_task.startswith('CODING'):
437
  # Ensure minimum files exist before asking CodeGen to code
438
- # This happens once at the start of the first coding task
439
- if project_state['attempt_count'] == 0 and project_state['current_task'] == 'CODING - Initial Implementation':
440
- # Add initial stub files if they don't exist
441
- if project_state['main_app_file'] not in project_state['files']:
 
442
  project_state['files'][project_state['main_app_file']] = f"# Initial {project_state['sdk_choice']} app file\n" # Start with a basic stub
443
  if project_state['sdk_choice'] == 'gradio':
444
  project_state['files'][project_state['main_app_file']] += "import gradio as gr\n\n# Define a simple interface\n# For example: gr.Interface(...).launch()\n"
445
  elif project_state['sdk_choice'] == 'streamlit':
446
  project_state['files'][project_state['main_app_file']] += "import streamlit as st\n\n# Your Streamlit app starts here\n# For example: st.write('Hello, world!')\n"
 
 
447
 
448
  if 'requirements.txt' not in project_state['files']:
 
449
  req_content = "pandas\n" + ("streamlit\n" if project_state['sdk_choice']=="streamlit" else "gradio\n") + "google-generativeai\nhuggingface-hub\n"
450
  project_state['files']['requirements.txt'] = req_content
451
 
452
  if 'README.md' not in project_state['files']:
 
453
  readme_content = f"""---
454
  title: {project_state['repo_id']}
455
  emoji: 🐢
@@ -519,7 +530,7 @@ This is an auto-generated HF Space.
519
  # Wait a moment for build to start
520
  time.sleep(5) # Initial wait
521
  wait_time = 5
522
- max_log_wait = 120 # Maximum total time to wait for logs in this step
523
  elapsed_log_wait = 0
524
  logs_fetched = False
525
  iframe_checked = False
 
10
  )
11
  from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
12
  from google import genai
13
+ # Import Content and Part types for structured multi-turn/multi-role inputs
14
+ from google.genai.types import Content, Part
15
  from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
16
 
17
 
 
193
  model_to_use = model_name
194
 
195
  try:
196
+ # *** FIX: Use Content objects with Part objects inside the parts list ***
197
  messages = [
198
+ Content(role="system", parts=[Part(text=system_prompt)]), # System instruction as a Part
199
+ Content(role="user", parts=[Part(text=user_message_content)]) # User state as a Part
200
  ]
201
 
202
  response = client.models.generate_content(
 
210
  # Ensure candidate and content exist before accessing parts
211
  if not response.candidates or not response.candidates[0].content:
212
  print("Agent returned no candidates or empty content.")
213
+ # Check if there was a rejection reason
214
+ if response.prompt_feedback and response.prompt_feedback.block_reason:
215
+ block_reason = response.prompt_feedback.block_reason
216
+ print(f"Prompt was blocked. Reason: {block_reason}")
217
+ return f"ERROR: Agent response blocked by safety filters. Reason: {block_reason.name}" # Return reason if available
218
  return f"ERROR: Agent returned no response content."
219
 
220
+
221
  response_text = "".join([part.text for part in response.candidates[0].content.parts])
222
 
223
  print(f"--- Agent Response --- ({model_to_use})")
 
258
  }
259
  response_text = run_agent(
260
  client=client,
261
+ model_name="gemini-2.5-flash-preview-04-17", # Use the specific model name
262
  system_prompt_template=SYSTEM_ARCHITECT,
263
  user_input_state=input_state_for_planner,
264
  config=config,
 
289
  }
290
  response_text = run_agent(
291
  client=client,
292
+ model_name="gemini-2.5-flash-preview-04-17", # Use the specific model name
293
  system_prompt_template=SYSTEM_CODEGEN,
294
  user_input_state=input_state_for_codegen,
295
  config=config,
 
381
  }
382
  response_text = run_agent(
383
  client=client,
384
+ model_name="gemini-2.5-flash-preview-04-17", # Use the specific model name
385
  system_prompt_template=SYSTEM_DEBUG,
386
  user_input_state=input_state_for_debugger,
387
  config=config,
 
441
 
442
  elif current_task.startswith('CODING'):
443
  # Ensure minimum files exist before asking CodeGen to code
444
+ # This happens once at the start of the first coding task or if syntax errors occurred
445
+ if project_state['attempt_count'] == 0 or 'Syntax Errors Found' in project_state.get('feedback', ''):
446
+ # Add initial stub files if they don't exist or need regeneration due to syntax issues
447
+ if project_state['main_app_file'] not in project_state['files'] or project_state.get('needs_initial_stubs', False): # Check flag if set
448
+ print("Adding initial stubs for main app file...")
449
  project_state['files'][project_state['main_app_file']] = f"# Initial {project_state['sdk_choice']} app file\n" # Start with a basic stub
450
  if project_state['sdk_choice'] == 'gradio':
451
  project_state['files'][project_state['main_app_file']] += "import gradio as gr\n\n# Define a simple interface\n# For example: gr.Interface(...).launch()\n"
452
  elif project_state['sdk_choice'] == 'streamlit':
453
  project_state['files'][project_state['main_app_file']] += "import streamlit as st\n\n# Your Streamlit app starts here\n# For example: st.write('Hello, world!')\n"
454
+ project_state['needs_initial_stubs'] = False # Reset flag
455
+
456
 
457
  if 'requirements.txt' not in project_state['files']:
458
+ print("Adding initial requirements.txt stub...")
459
  req_content = "pandas\n" + ("streamlit\n" if project_state['sdk_choice']=="streamlit" else "gradio\n") + "google-generativeai\nhuggingface-hub\n"
460
  project_state['files']['requirements.txt'] = req_content
461
 
462
  if 'README.md' not in project_state['files']:
463
+ print("Adding initial README.md stub...")
464
  readme_content = f"""---
465
  title: {project_state['repo_id']}
466
  emoji: 🐢
 
530
  # Wait a moment for build to start
531
  time.sleep(5) # Initial wait
532
  wait_time = 5
533
+ max_log_wait = 150 # Increased max wait time for logs
534
  elapsed_log_wait = 0
535
  logs_fetched = False
536
  iframe_checked = False