Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,8 +10,8 @@ from huggingface_hub import (
|
|
10 |
)
|
11 |
from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
|
12 |
from google import genai
|
13 |
-
# Import Content
|
14 |
-
|
15 |
from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
|
16 |
|
17 |
|
@@ -189,23 +189,29 @@ def run_agent(client, model_name, system_prompt_template, user_input_state, conf
|
|
189 |
# Prepare the message content by formatting the project state
|
190 |
user_message_content = "Project State:\n" + json.dumps(user_input_state, indent=2)
|
191 |
|
192 |
-
#
|
193 |
-
|
194 |
|
195 |
try:
|
196 |
-
#
|
197 |
-
|
|
|
|
|
|
|
198 |
|
199 |
-
# Pass a list containing the single combined prompt string
|
200 |
response = client.models.generate_content(
|
201 |
model=model_to_use,
|
202 |
-
contents=
|
203 |
config=config
|
204 |
)
|
205 |
-
# *** FIX: REMOVED response.raise_for_status() as it doesn't exist on GenerateContentResponse ***
|
206 |
# API errors are handled by the SDK raising exceptions caught below.
|
207 |
|
208 |
# Some models return parts, concatenate them
|
|
|
|
|
|
|
|
|
|
|
209 |
response_text = "".join([part.text for part in response.candidates[0].content.parts])
|
210 |
|
211 |
print(f"--- Agent Response --- ({model_to_use})")
|
@@ -246,7 +252,7 @@ def run_planner(client, project_state, config):
|
|
246 |
}
|
247 |
response_text = run_agent(
|
248 |
client=client,
|
249 |
-
model_name="gemini-
|
250 |
system_prompt_template=SYSTEM_ARCHITECT,
|
251 |
user_input_state=input_state_for_planner,
|
252 |
config=config,
|
@@ -277,7 +283,7 @@ def run_codegen(client, project_state, config):
|
|
277 |
}
|
278 |
response_text = run_agent(
|
279 |
client=client,
|
280 |
-
model_name="gemini-
|
281 |
system_prompt_template=SYSTEM_CODEGEN,
|
282 |
user_input_state=input_state_for_codegen,
|
283 |
config=config,
|
@@ -369,7 +375,7 @@ def run_debugger(client, project_state, config):
|
|
369 |
}
|
370 |
response_text = run_agent(
|
371 |
client=client,
|
372 |
-
model_name="gemini-
|
373 |
system_prompt_template=SYSTEM_DEBUG,
|
374 |
user_input_state=input_state_for_debugger,
|
375 |
config=config,
|
@@ -859,7 +865,6 @@ with gr.Blocks(title="HF Space Auto‑Builder (Team AI)") as demo:
|
|
859 |
outputs=[chatbot, build_box, run_box, preview, project_status_md]
|
860 |
)
|
861 |
|
862 |
-
# Update the submit handler - inputs match handle_user_message signature
|
863 |
user_in.submit(
|
864 |
fn=handle_user_message,
|
865 |
inputs=[
|
|
|
10 |
)
|
11 |
from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status
|
12 |
from google import genai
|
13 |
+
# Import Content type for structured multi-turn/multi-role inputs
|
14 |
+
from google.genai.types import Content
|
15 |
from google.genai.types import Tool, GenerateContentConfig, GoogleSearch
|
16 |
|
17 |
|
|
|
189 |
# Prepare the message content by formatting the project state
|
190 |
user_message_content = "Project State:\n" + json.dumps(user_input_state, indent=2)
|
191 |
|
192 |
+
# Define the model to use based on the parameter
|
193 |
+
model_to_use = model_name
|
194 |
|
195 |
try:
|
196 |
+
# *** FIX: Use Content objects for system and user roles ***
|
197 |
+
messages = [
|
198 |
+
Content(role="system", parts=[system_prompt]), # System instruction as a part
|
199 |
+
Content(role="user", parts=[user_message_content]) # User state as a part
|
200 |
+
]
|
201 |
|
|
|
202 |
response = client.models.generate_content(
|
203 |
model=model_to_use,
|
204 |
+
contents=messages, # Pass the list of Content objects
|
205 |
config=config
|
206 |
)
|
|
|
207 |
# API errors are handled by the SDK raising exceptions caught below.
|
208 |
|
209 |
# Some models return parts, concatenate them
|
210 |
+
# Ensure candidate and content exist before accessing parts
|
211 |
+
if not response.candidates or not response.candidates[0].content:
|
212 |
+
print("Agent returned no candidates or empty content.")
|
213 |
+
return f"ERROR: Agent returned no response content."
|
214 |
+
|
215 |
response_text = "".join([part.text for part in response.candidates[0].content.parts])
|
216 |
|
217 |
print(f"--- Agent Response --- ({model_to_use})")
|
|
|
252 |
}
|
253 |
response_text = run_agent(
|
254 |
client=client,
|
255 |
+
model_name="gemini-2.5-flash-preview-04-17", # *** FIX: Use the specific model name ***
|
256 |
system_prompt_template=SYSTEM_ARCHITECT,
|
257 |
user_input_state=input_state_for_planner,
|
258 |
config=config,
|
|
|
283 |
}
|
284 |
response_text = run_agent(
|
285 |
client=client,
|
286 |
+
model_name="gemini-2.5-flash-preview-04-17", # *** FIX: Use the specific model name ***
|
287 |
system_prompt_template=SYSTEM_CODEGEN,
|
288 |
user_input_state=input_state_for_codegen,
|
289 |
config=config,
|
|
|
375 |
}
|
376 |
response_text = run_agent(
|
377 |
client=client,
|
378 |
+
model_name="gemini-2.5-flash-preview-04-17", # *** FIX: Use the specific model name ***
|
379 |
system_prompt_template=SYSTEM_DEBUG,
|
380 |
user_input_state=input_state_for_debugger,
|
381 |
config=config,
|
|
|
865 |
outputs=[chatbot, build_box, run_box, preview, project_status_md]
|
866 |
)
|
867 |
|
|
|
868 |
user_in.submit(
|
869 |
fn=handle_user_message,
|
870 |
inputs=[
|