Update app.py
Browse files
app.py
CHANGED
@@ -109,12 +109,12 @@ try:
|
|
109 |
genai.types.FunctionDeclaration(
|
110 |
name='load_page',
|
111 |
description='Fetches the content of a specific web page URL as Markdown text. Use this when the user asks for information from a specific URL they provide, or when you need to look up live information mentioned alongside a specific source URL.',
|
112 |
-
# --- CORRECTED PARAMETERS STRUCTURE ---
|
113 |
-
parameters={
|
114 |
-
'type':
|
115 |
'properties': {
|
116 |
-
'url': {
|
117 |
-
'type':
|
118 |
'description': "The *full* URL of the webpage to load (must start with http:// or https://)."
|
119 |
}
|
120 |
},
|
@@ -124,7 +124,7 @@ try:
|
|
124 |
)
|
125 |
]
|
126 |
)
|
127 |
-
#
|
128 |
code_execution_tool = genai.types.Tool(code_execution=genai.types.ToolCodeExecution())
|
129 |
|
130 |
tools = [browse_tool, code_execution_tool]
|
@@ -252,9 +252,6 @@ def generate_response_with_tools(user_input, history_state):
|
|
252 |
# This ensures the model's thought process (including asking for tools) is recorded
|
253 |
if candidate.content:
|
254 |
current_history_for_api.append(candidate.content)
|
255 |
-
# elif finish_reason != genai.types.Candidate.FinishReason.STOP: # Handle cases where content might be missing but expected
|
256 |
-
# print(f"Warning: Candidate content is missing, finish reason: {finish_reason.name}")
|
257 |
-
|
258 |
|
259 |
# Check for non-stop, non-tool reasons first
|
260 |
if finish_reason not in (genai.types.Candidate.FinishReason.STOP, genai.types.Candidate.FinishReason.TOOL_CALL):
|
@@ -275,8 +272,6 @@ def generate_response_with_tools(user_input, history_state):
|
|
275 |
if not candidate.content or not candidate.content.parts:
|
276 |
print("Error: TOOL_CALL indicated but candidate content/parts is missing.")
|
277 |
final_bot_message = "[Model indicated tool use but provided no details.]"
|
278 |
-
# Append error message as model turn?
|
279 |
-
# current_history_for_api.append(genai.types.Content(role="model", parts=[genai.types.Part.from_text(final_bot_message)]))
|
280 |
break
|
281 |
|
282 |
# Extract valid function calls
|
@@ -288,7 +283,6 @@ def generate_response_with_tools(user_input, history_state):
|
|
288 |
final_bot_message = "".join([p.text for p in candidate.content.parts if hasattr(p, 'text') and p.text])
|
289 |
if not final_bot_message:
|
290 |
final_bot_message = "[Model indicated tool use but provided no callable function or text.]"
|
291 |
-
# Model turn (with text if any) is already in history.
|
292 |
break
|
293 |
|
294 |
# Execute tools and collect responses
|
@@ -300,7 +294,6 @@ def generate_response_with_tools(user_input, history_state):
|
|
300 |
if not tool_responses:
|
301 |
print("Warning: No valid tool responses generated despite TOOL_CALL.")
|
302 |
final_bot_message = "[Failed to process tool call request.]"
|
303 |
-
# Decide if to append this message. History has model asking for tool.
|
304 |
break
|
305 |
|
306 |
# Add the tool execution results to history for the next API call
|
@@ -312,9 +305,9 @@ def generate_response_with_tools(user_input, history_state):
|
|
312 |
else: # FinishReason == STOP
|
313 |
print("No tool call requested. Final response received.")
|
314 |
# Extract final text and any code suggestions/results from the *last* model turn
|
315 |
-
# (which we added to history before the check)
|
316 |
final_bot_message = ""
|
317 |
code_parts_display = []
|
|
|
318 |
if current_history_for_api and current_history_for_api[-1].role == "model":
|
319 |
last_model_content = current_history_for_api[-1]
|
320 |
if last_model_content.parts:
|
@@ -326,11 +319,11 @@ def generate_response_with_tools(user_input, history_state):
|
|
326 |
code = getattr(part.executable_code, 'code', '') # Safe access
|
327 |
code_parts_display.append(f"Suggested Code ({lang}):\n```{'python' if lang == 'unknown_language' else lang}\n{code}\n```")
|
328 |
elif hasattr(part, 'code_execution_result') and part.code_execution_result:
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
outcome_val = getattr(part.code_execution_result, 'outcome', None)
|
333 |
-
outcome_str = "Success" if outcome_val ==
|
334 |
output = getattr(part.code_execution_result, 'output', '')
|
335 |
code_parts_display.append(f"Code Execution Result ({outcome_str}):\n```\n{output}\n```")
|
336 |
|
@@ -343,7 +336,6 @@ def generate_response_with_tools(user_input, history_state):
|
|
343 |
if current_history_for_api[-1].role == "model" and not any(hasattr(p,'text') and p.text for p in current_history_for_api[-1].parts):
|
344 |
current_history_for_api[-1].parts.append(genai.types.Part.from_text(final_bot_message))
|
345 |
|
346 |
-
|
347 |
break # Exit the while loop (finish reason was STOP)
|
348 |
|
349 |
# End of while loop
|
@@ -353,10 +345,10 @@ def generate_response_with_tools(user_input, history_state):
|
|
353 |
final_bot_message += warning_msg
|
354 |
# Append warning to the last model message in history
|
355 |
if current_history_for_api and current_history_for_api[-1].role == "model":
|
|
|
|
|
|
|
356 |
current_history_for_api[-1].parts.append(genai.types.Part.from_text(warning_msg))
|
357 |
-
# elif current_history_for_api: # If last turn wasn't model (e.g. tool), add new model msg
|
358 |
-
# current_history_for_api.append(genai.types.Content(role="model", parts=[genai.types.Part.from_text(warning_msg)]))
|
359 |
-
|
360 |
|
361 |
print("--- Response Generation Complete ---")
|
362 |
|
@@ -367,39 +359,37 @@ def generate_response_with_tools(user_input, history_state):
|
|
367 |
if content.role == "system": continue
|
368 |
|
369 |
display_text = ""
|
370 |
-
if content.parts:
|
371 |
for part in content.parts:
|
372 |
-
# Simplified text extraction for display - assumes primary output is text
|
373 |
if hasattr(part, 'text') and part.text:
|
374 |
display_text += part.text + "\n"
|
375 |
-
# Optionally add code block formatting here if needed for display
|
376 |
elif hasattr(part, 'executable_code') and part.executable_code:
|
377 |
lang = getattr(getattr(part.executable_code, 'language', None), 'name', 'python').lower()
|
378 |
code = getattr(part.executable_code, 'code', '')
|
379 |
display_text += f"\n```{'python' if lang == 'unknown_language' else lang}\n{code}\n```\n"
|
380 |
elif hasattr(part, 'code_execution_result') and part.code_execution_result:
|
381 |
-
|
382 |
outcome_val = getattr(part.code_execution_result, 'outcome', None)
|
383 |
-
|
|
|
384 |
output = getattr(part.code_execution_result, 'output', '')
|
385 |
display_text += f"\nCode Execution Result ({outcome_str}):\n```\n{output}\n```\n"
|
386 |
|
387 |
display_text = display_text.strip()
|
388 |
|
389 |
-
if not display_text and content.role not in ["tool"]: continue
|
390 |
|
391 |
if content.role == "user":
|
392 |
user_msg_buffer = display_text
|
393 |
elif content.role == "model":
|
394 |
if user_msg_buffer is not None:
|
395 |
-
chatbot_display_list.append([user_msg_buffer, display_text or "[Processing...]"])
|
396 |
-
user_msg_buffer = None
|
397 |
else:
|
398 |
-
# Consecutive model messages or initial message
|
399 |
chatbot_display_list.append([None, display_text])
|
400 |
# Ignore 'tool' role messages for chat display
|
401 |
|
402 |
-
# If loop ended with user msg pending
|
403 |
if user_msg_buffer is not None:
|
404 |
chatbot_display_list.append([user_msg_buffer, None])
|
405 |
|
@@ -416,7 +406,9 @@ def generate_response_with_tools(user_input, history_state):
|
|
416 |
temp_user_msg = None
|
417 |
for content in history_state: # Use the state *before* the error
|
418 |
if content.role == "system": continue
|
419 |
-
text = ""
|
|
|
|
|
420 |
if content.role == "user": temp_user_msg = text
|
421 |
elif content.role == "model" and temp_user_msg:
|
422 |
error_display_list.append([temp_user_msg, text])
|
@@ -427,7 +419,9 @@ def generate_response_with_tools(user_input, history_state):
|
|
427 |
error_display_list.append([None, error_message]) # Add error message bubble
|
428 |
|
429 |
# Return the history state *before* the failed turn started
|
430 |
-
|
|
|
|
|
431 |
|
432 |
|
433 |
# --- Gradio Interface ---
|
@@ -436,17 +430,13 @@ with gr.Blocks(title="Gemini AI Assistant w/ Tools", theme=gr.themes.Soft()) as
|
|
436 |
gr.Markdown(f"# 🚀 Gemini AI Assistant ({MODEL_NAME})")
|
437 |
gr.Markdown("Ask questions, request info from specific URLs, or ask for code/calculations. Uses function calling and code execution.")
|
438 |
|
439 |
-
#
|
440 |
-
# While 'messages' format is preferred long-term, 'tuples' still works.
|
441 |
-
# Changing would require refactoring the bot_response_update function's return value.
|
442 |
-
# For now, we acknowledge the warning but keep the 'tuples' format as implemented.
|
443 |
chatbot_display = gr.Chatbot(
|
444 |
label="Conversation",
|
445 |
-
bubble_full_width=False,
|
446 |
height=600,
|
447 |
show_copy_button=True,
|
448 |
render_markdown=True,
|
449 |
-
# type='messages' # Would require backend functions to return list-of-dicts
|
450 |
)
|
451 |
|
452 |
with gr.Row():
|
@@ -458,8 +448,7 @@ with gr.Blocks(title="Gemini AI Assistant w/ Tools", theme=gr.themes.Soft()) as
|
|
458 |
)
|
459 |
with gr.Column(scale=1, min_width=150):
|
460 |
send_btn = gr.Button("➡️ Send", variant="primary")
|
461 |
-
|
462 |
-
clear_btn = gr.ClearButton(value="🗑️ Clear Chat")
|
463 |
|
464 |
chat_history_state = gr.State([])
|
465 |
|
@@ -472,9 +461,9 @@ with gr.Blocks(title="Gemini AI Assistant w/ Tools", theme=gr.themes.Soft()) as
|
|
472 |
|
473 |
def bot_response_update(history_display_list, history_state):
|
474 |
"""Calls the backend Gemini function which returns updated display list and state."""
|
475 |
-
if not history_display_list or history_display_list[-1][1] is not None:
|
476 |
print("Bot update called without pending user message in display list.")
|
477 |
-
return history_display_list, history_state
|
478 |
|
479 |
user_message = history_display_list[-1][0]
|
480 |
print(f"User message being sent to backend: {user_message}")
|
@@ -488,12 +477,12 @@ with gr.Blocks(title="Gemini AI Assistant w/ Tools", theme=gr.themes.Soft()) as
|
|
488 |
msg_input.submit(
|
489 |
user_message_update,
|
490 |
[msg_input, chatbot_display],
|
491 |
-
[msg_input, chatbot_display],
|
492 |
-
queue=False,
|
493 |
).then(
|
494 |
bot_response_update,
|
495 |
-
[chatbot_display, chat_history_state],
|
496 |
-
[chatbot_display, chat_history_state]
|
497 |
)
|
498 |
|
499 |
send_btn.click(
|
@@ -509,13 +498,15 @@ with gr.Blocks(title="Gemini AI Assistant w/ Tools", theme=gr.themes.Soft()) as
|
|
509 |
|
510 |
# Custom clear function to reset state as well
|
511 |
def clear_all():
|
512 |
-
|
|
|
513 |
|
|
|
514 |
clear_btn.click(clear_all, [], [msg_input, chatbot_display, chat_history_state], queue=False)
|
515 |
|
516 |
|
517 |
if __name__ == "__main__":
|
518 |
print("Starting Gradio App...")
|
519 |
# Ensure queue is enabled for potentially long-running backend calls
|
520 |
-
demo.queue().launch(server_name="0.0.0.0", server_port=7860)
|
521 |
print("Gradio App Stopped.")
|
|
|
109 |
genai.types.FunctionDeclaration(
|
110 |
name='load_page',
|
111 |
description='Fetches the content of a specific web page URL as Markdown text. Use this when the user asks for information from a specific URL they provide, or when you need to look up live information mentioned alongside a specific source URL.',
|
112 |
+
# --- CORRECTED PARAMETERS STRUCTURE WITH STRING TYPES ---
|
113 |
+
parameters={
|
114 |
+
'type': 'object', # Use string 'object'
|
115 |
'properties': {
|
116 |
+
'url': {
|
117 |
+
'type': 'string', # Use string 'string'
|
118 |
'description': "The *full* URL of the webpage to load (must start with http:// or https://)."
|
119 |
}
|
120 |
},
|
|
|
124 |
)
|
125 |
]
|
126 |
)
|
127 |
+
# Code execution tool definition remains the same
|
128 |
code_execution_tool = genai.types.Tool(code_execution=genai.types.ToolCodeExecution())
|
129 |
|
130 |
tools = [browse_tool, code_execution_tool]
|
|
|
252 |
# This ensures the model's thought process (including asking for tools) is recorded
|
253 |
if candidate.content:
|
254 |
current_history_for_api.append(candidate.content)
|
|
|
|
|
|
|
255 |
|
256 |
# Check for non-stop, non-tool reasons first
|
257 |
if finish_reason not in (genai.types.Candidate.FinishReason.STOP, genai.types.Candidate.FinishReason.TOOL_CALL):
|
|
|
272 |
if not candidate.content or not candidate.content.parts:
|
273 |
print("Error: TOOL_CALL indicated but candidate content/parts is missing.")
|
274 |
final_bot_message = "[Model indicated tool use but provided no details.]"
|
|
|
|
|
275 |
break
|
276 |
|
277 |
# Extract valid function calls
|
|
|
283 |
final_bot_message = "".join([p.text for p in candidate.content.parts if hasattr(p, 'text') and p.text])
|
284 |
if not final_bot_message:
|
285 |
final_bot_message = "[Model indicated tool use but provided no callable function or text.]"
|
|
|
286 |
break
|
287 |
|
288 |
# Execute tools and collect responses
|
|
|
294 |
if not tool_responses:
|
295 |
print("Warning: No valid tool responses generated despite TOOL_CALL.")
|
296 |
final_bot_message = "[Failed to process tool call request.]"
|
|
|
297 |
break
|
298 |
|
299 |
# Add the tool execution results to history for the next API call
|
|
|
305 |
else: # FinishReason == STOP
|
306 |
print("No tool call requested. Final response received.")
|
307 |
# Extract final text and any code suggestions/results from the *last* model turn
|
|
|
308 |
final_bot_message = ""
|
309 |
code_parts_display = []
|
310 |
+
# Ensure we look at the last content which should be the final model response
|
311 |
if current_history_for_api and current_history_for_api[-1].role == "model":
|
312 |
last_model_content = current_history_for_api[-1]
|
313 |
if last_model_content.parts:
|
|
|
319 |
code = getattr(part.executable_code, 'code', '') # Safe access
|
320 |
code_parts_display.append(f"Suggested Code ({lang}):\n```{'python' if lang == 'unknown_language' else lang}\n{code}\n```")
|
321 |
elif hasattr(part, 'code_execution_result') and part.code_execution_result:
|
322 |
+
# Safer access to outcome comparison value
|
323 |
+
outcome_enum = getattr(genai.types, 'ExecutableCodeResponse', None)
|
324 |
+
outcome_ok_val = getattr(outcome_enum.Outcome, 'OK', None) if outcome_enum and hasattr(outcome_enum, 'Outcome') else 1 # Default fallback
|
325 |
outcome_val = getattr(part.code_execution_result, 'outcome', None)
|
326 |
+
outcome_str = "Success" if outcome_val == outcome_ok_val else "Failure"
|
327 |
output = getattr(part.code_execution_result, 'output', '')
|
328 |
code_parts_display.append(f"Code Execution Result ({outcome_str}):\n```\n{output}\n```")
|
329 |
|
|
|
336 |
if current_history_for_api[-1].role == "model" and not any(hasattr(p,'text') and p.text for p in current_history_for_api[-1].parts):
|
337 |
current_history_for_api[-1].parts.append(genai.types.Part.from_text(final_bot_message))
|
338 |
|
|
|
339 |
break # Exit the while loop (finish reason was STOP)
|
340 |
|
341 |
# End of while loop
|
|
|
345 |
final_bot_message += warning_msg
|
346 |
# Append warning to the last model message in history
|
347 |
if current_history_for_api and current_history_for_api[-1].role == "model":
|
348 |
+
# Check if parts list exists before appending
|
349 |
+
if not hasattr(current_history_for_api[-1], 'parts') or not current_history_for_api[-1].parts:
|
350 |
+
current_history_for_api[-1].parts = [] # Initialize if needed
|
351 |
current_history_for_api[-1].parts.append(genai.types.Part.from_text(warning_msg))
|
|
|
|
|
|
|
352 |
|
353 |
print("--- Response Generation Complete ---")
|
354 |
|
|
|
359 |
if content.role == "system": continue
|
360 |
|
361 |
display_text = ""
|
362 |
+
if hasattr(content, 'parts') and content.parts: # Ensure parts exists
|
363 |
for part in content.parts:
|
|
|
364 |
if hasattr(part, 'text') and part.text:
|
365 |
display_text += part.text + "\n"
|
|
|
366 |
elif hasattr(part, 'executable_code') and part.executable_code:
|
367 |
lang = getattr(getattr(part.executable_code, 'language', None), 'name', 'python').lower()
|
368 |
code = getattr(part.executable_code, 'code', '')
|
369 |
display_text += f"\n```{'python' if lang == 'unknown_language' else lang}\n{code}\n```\n"
|
370 |
elif hasattr(part, 'code_execution_result') and part.code_execution_result:
|
371 |
+
# Simplified display formatting
|
372 |
outcome_val = getattr(part.code_execution_result, 'outcome', None)
|
373 |
+
outcome_ok_val = getattr(getattr(genai.types, 'ExecutableCodeResponse', {}).get('Outcome',{}), 'OK', 1)
|
374 |
+
outcome_str = "Success" if outcome_val == outcome_ok_val else "Failure"
|
375 |
output = getattr(part.code_execution_result, 'output', '')
|
376 |
display_text += f"\nCode Execution Result ({outcome_str}):\n```\n{output}\n```\n"
|
377 |
|
378 |
display_text = display_text.strip()
|
379 |
|
380 |
+
if not display_text and content.role not in ["tool"]: continue
|
381 |
|
382 |
if content.role == "user":
|
383 |
user_msg_buffer = display_text
|
384 |
elif content.role == "model":
|
385 |
if user_msg_buffer is not None:
|
386 |
+
chatbot_display_list.append([user_msg_buffer, display_text or "[Processing...]"])
|
387 |
+
user_msg_buffer = None
|
388 |
else:
|
|
|
389 |
chatbot_display_list.append([None, display_text])
|
390 |
# Ignore 'tool' role messages for chat display
|
391 |
|
392 |
+
# If loop ended with user msg pending
|
393 |
if user_msg_buffer is not None:
|
394 |
chatbot_display_list.append([user_msg_buffer, None])
|
395 |
|
|
|
406 |
temp_user_msg = None
|
407 |
for content in history_state: # Use the state *before* the error
|
408 |
if content.role == "system": continue
|
409 |
+
text = ""
|
410 |
+
if hasattr(content, 'parts') and content.parts: # Check parts exists
|
411 |
+
text = "".join([p.text for p in content.parts if hasattr(p, 'text')])
|
412 |
if content.role == "user": temp_user_msg = text
|
413 |
elif content.role == "model" and temp_user_msg:
|
414 |
error_display_list.append([temp_user_msg, text])
|
|
|
419 |
error_display_list.append([None, error_message]) # Add error message bubble
|
420 |
|
421 |
# Return the history state *before* the failed turn started
|
422 |
+
# Make sure conversation_history exists and is a list before slicing
|
423 |
+
previous_history = conversation_history[:-1] if isinstance(conversation_history, list) and conversation_history else []
|
424 |
+
return error_display_list, previous_history
|
425 |
|
426 |
|
427 |
# --- Gradio Interface ---
|
|
|
430 |
gr.Markdown(f"# 🚀 Gemini AI Assistant ({MODEL_NAME})")
|
431 |
gr.Markdown("Ask questions, request info from specific URLs, or ask for code/calculations. Uses function calling and code execution.")
|
432 |
|
433 |
+
# Acknowledge Gradio warnings but keep tuple format for now
|
|
|
|
|
|
|
434 |
chatbot_display = gr.Chatbot(
|
435 |
label="Conversation",
|
436 |
+
bubble_full_width=False, # Keep param even if deprecated, avoids needing older Gradio
|
437 |
height=600,
|
438 |
show_copy_button=True,
|
439 |
render_markdown=True,
|
|
|
440 |
)
|
441 |
|
442 |
with gr.Row():
|
|
|
448 |
)
|
449 |
with gr.Column(scale=1, min_width=150):
|
450 |
send_btn = gr.Button("➡️ Send", variant="primary")
|
451 |
+
clear_btn = gr.ClearButton(value="🗑️ Clear Chat") # Will use custom function below
|
|
|
452 |
|
453 |
chat_history_state = gr.State([])
|
454 |
|
|
|
461 |
|
462 |
def bot_response_update(history_display_list, history_state):
|
463 |
"""Calls the backend Gemini function which returns updated display list and state."""
|
464 |
+
if not history_display_list or (len(history_display_list[-1]) > 1 and history_display_list[-1][1] is not None):
|
465 |
print("Bot update called without pending user message in display list.")
|
466 |
+
return history_display_list, history_state
|
467 |
|
468 |
user_message = history_display_list[-1][0]
|
469 |
print(f"User message being sent to backend: {user_message}")
|
|
|
477 |
msg_input.submit(
|
478 |
user_message_update,
|
479 |
[msg_input, chatbot_display],
|
480 |
+
[msg_input, chatbot_display],
|
481 |
+
queue=False,
|
482 |
).then(
|
483 |
bot_response_update,
|
484 |
+
[chatbot_display, chat_history_state],
|
485 |
+
[chatbot_display, chat_history_state]
|
486 |
)
|
487 |
|
488 |
send_btn.click(
|
|
|
498 |
|
499 |
# Custom clear function to reset state as well
|
500 |
def clear_all():
|
501 |
+
# Return values correspond to outputs=[msg_input, chatbot_display, chat_history_state]
|
502 |
+
return ["", None, []]
|
503 |
|
504 |
+
# Wire clear button to the custom function
|
505 |
clear_btn.click(clear_all, [], [msg_input, chatbot_display, chat_history_state], queue=False)
|
506 |
|
507 |
|
508 |
if __name__ == "__main__":
|
509 |
print("Starting Gradio App...")
|
510 |
# Ensure queue is enabled for potentially long-running backend calls
|
511 |
+
demo.queue().launch(server_name="0.0.0.0", server_port=7860, show_error=True) # Add show_error=True for debugging
|
512 |
print("Gradio App Stopped.")
|