File size: 25,451 Bytes
0617856
3fd0067
d375a16
0617856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
baface3
0617856
 
 
 
3fd0067
0617856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fced44
0617856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fced44
0617856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5fced44
9b324d1
0617856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bdfd7a5
5f3d5cb
0617856
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
# -*- coding: utf-8 -*-
import os
import gradio as gr
# Import the main module and use an alias
import google.generativeai as genai
# Core types like Content/Part are accessed directly via 'genai'
# Other types like Tool, FunctionDeclaration are under genai.types

import requests
import markdownify
from urllib.robotparser import RobotFileParser
from urllib.parse import urlparse
import traceback
import json # Useful for debugging args

# --- Browser/Web Tool Functions ---

def can_crawl_url(url: str, user_agent: str = "PythonGoogleGenAIAgent/1.0") -> bool:
    """Check robots.txt permissions for a URL"""
    if not url:
        print("No URL provided to can_crawl_url")
        return False
    try:
        parsed_url = urlparse(url)
        if not parsed_url.scheme or not parsed_url.netloc:
            print(f"Invalid URL format for robots.txt check: {url}")
            return False
        robots_url = f"{parsed_url.scheme}://{parsed_url.netloc}/robots.txt"
        print(f"Checking robots.txt at: {robots_url} for URL: {url}")
        rp = RobotFileParser()
        rp.set_url(robots_url)
        rp.read()
        can_fetch = rp.can_fetch(user_agent, url)
        print(f"Can fetch {url} with agent '{user_agent}': {can_fetch}")
        return can_fetch
    except Exception as e:
        print(f"Error checking robots.txt for {url}: {e}")
        return False

def load_page(url: str) -> str:
    """
    Load webpage content as markdown. Designed to be used as a Gemini Function.
    Args:
        url: The URL of the webpage to load.
    Returns:
        Markdown content of the page or an error message.
    """
    print(f"Attempting to load page: {url}")
    if not url:
        return "Error: No URL provided."
    if not url.startswith(('http://', 'https://')):
         return f"Error: Invalid URL scheme. Please provide http or https URL. Got: {url}"

    USER_AGENT = "PythonGoogleGenAIAgent/1.0 (Function Calling)"
    if not can_crawl_url(url, user_agent=USER_AGENT):
        print(f"URL {url} failed robots.txt check for agent {USER_AGENT}")
        return f"Error: Access denied by robots.txt for URL {url}"
    try:
        headers = {'User-Agent': USER_AGENT}
        response = requests.get(url, timeout=15, headers=headers, allow_redirects=True)
        response.raise_for_status()
        content_type = response.headers.get('content-type', '').lower()
        if 'html' not in content_type:
            print(f"Non-HTML content type '{content_type}' at {url}. Returning summary.")
            return f"Content at {url} is of type '{content_type}'. Size: {len(response.content)} bytes. Cannot convert to Markdown."

        MAX_CONTENT_SIZE = 1_000_000
        if len(response.content) > MAX_CONTENT_SIZE:
             print(f"Content size {len(response.content)} exceeds limit {MAX_CONTENT_SIZE}. Truncating.")
             try:
                 html_content = response.content[:MAX_CONTENT_SIZE].decode(response.apparent_encoding or 'utf-8', errors='ignore')
             except Exception as decode_err:
                 print(f"Decoding error after truncation: {decode_err}. Falling back to utf-8 ignore.")
                 html_content = response.content[:MAX_CONTENT_SIZE].decode('utf-8', errors='ignore')
             truncated_msg = "\n\n[Content truncated due to size limit]"
        else:
            html_content = response.text
            truncated_msg = ""

        markdown_content = markdownify.markdownify(html_content, heading_style="ATX", strip=['script', 'style'], escape_underscores=False)
        markdown_content = '\n'.join([line.strip() for line in markdown_content.splitlines() if line.strip()])
        print(f"Successfully loaded and converted {url} to markdown.")
        return f"Content from {url}:\n\n" + markdown_content + truncated_msg

    except requests.exceptions.Timeout:
        print(f"Timeout error loading page: {url}")
        return f"Error: Timeout while trying to load {url}"
    except requests.exceptions.RequestException as e:
        print(f"Request error loading page {url}: {str(e)}")
        return f"Error loading page {url}: {str(e)}"
    except Exception as e:
        print(f"General error loading page {url}: {str(e)}")
        traceback.print_exc()
        return f"Error loading page {url}: An unexpected error occurred ({type(e).__name__})."


# --- Gemini Client Initialization and Configuration ---
try:
    api_key = os.environ.get("GEMINI_API_KEY")
    if not api_key:
        raise ValueError("GEMINI_API_KEY environment variable not set.")
    genai.configure(api_key=api_key)

    MODEL_NAME = "gemini-2.5-pro-exp-03-25"
    print(f"Attempting to use EXPERIMENTAL model: {MODEL_NAME}")

    # Use genai.types for Tool, FunctionDeclaration etc.
    browse_tool = genai.types.Tool(
        function_declarations=[
            genai.types.FunctionDeclaration(
                name='load_page',
                description='Fetches the content of a specific web page URL as Markdown text. Use this when the user asks for information from a specific URL they provide, or when you need to look up live information mentioned alongside a specific source URL.',
                parameters={
                    'type': 'object',
                    'properties': {
                        'url': {
                             'type': 'string',
                             'description': "The *full* URL of the webpage to load (must start with http:// or https://)."
                        }
                    },
                    'required': ['url']
                }
            )
        ]
    )
    # Use genai.types.Tool, enable code execution with {}
    code_execution_tool = genai.types.Tool(code_execution={})

    tools = [browse_tool, code_execution_tool]

    model = genai.GenerativeModel(
        model_name=MODEL_NAME,
        tools=tools,
        # Use genai.types for HarmCategory and HarmBlockThreshold
        safety_settings={
             genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
             genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
             genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
             genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
        },
        system_instruction="You are a helpful AI assistant called Gemini-Toolkit. You can browse specific web pages provided by the user via the 'load_page' tool. You can also execute Python code using the 'code_execution' tool to perform calculations, analyze data, or demonstrate programming concepts. Explain your reasoning and the steps you take. If asked to browse, confirm the URL you are accessing. If providing code, explain what it does.",
    )
    print(f"Gemini client initialized with model: {MODEL_NAME} and tools.")

except Exception as e:
    print(f"CRITICAL ERROR: Error initializing Gemini client: {e}")
    traceback.print_exc()
    model = None
    tools = []


# --- Gradio App Logic ---

def handle_function_call(function_call):
    """Executes the function call requested by the model."""
    function_name = function_call.name
    args = function_call.args

    print(f"Executing Function Call: {function_name} with args: {dict(args)}")

    try:
        if function_name == 'load_page':
            url = args.get('url')
            if url:
                function_response_content = load_page(url=url)
                MAX_RESPONSE_LEN = 50000
                if len(function_response_content) > MAX_RESPONSE_LEN:
                    print(f"Tool Response truncated from {len(function_response_content)} to {MAX_RESPONSE_LEN} chars.")
                    function_response_content = function_response_content[:MAX_RESPONSE_LEN] + "\n\n[... Tool Response Truncated Due to Size Limit ...]"
            else:
                function_response_content = "Error: URL parameter was missing in the function call. Please ensure the 'url' argument is provided."
        else:
            print(f"Error: Received call for unknown function '{function_name}'")
            function_response_content = f"Error: Unknown function '{function_name}' called by the model."

        # *** CORRECTED: Use genai.Part and genai.types.FunctionResponse ***
        function_response_part = genai.Part(
            function_response=genai.types.FunctionResponse(
                name=function_name,
                response={'content': function_response_content}
            )
        )
        print(f"Function Response generated for {function_name}")
        return function_response_part

    except Exception as e:
         print(f"Error during execution of function '{function_name}': {e}")
         traceback.print_exc()
         # *** CORRECTED: Use genai.Part and genai.types.FunctionResponse ***
         return genai.Part(
             function_response=genai.types.FunctionResponse(
                 name=function_name,
                 response={'error': f"Failed to execute function {function_name}: {str(e)}"}
             )
         )

def generate_response_with_tools(user_input, history_state):
    """Handles user input, interacts with Gemini (incl. tools), and manages history."""
    if not model:
         return [[None, "Error: The AI model (Gemini) could not be initialized. Please check the logs or API key configuration."]], history_state or []

    if not user_input.strip():
        return [[None, "Please enter a valid query."]], history_state or []

    # --- History Management ---
    conversation_history = history_state if isinstance(history_state, list) else []
    # *** CORRECTED: Use genai.Content and genai.Part ***
    conversation_history.append(genai.Content(role="user", parts=[genai.Part.from_text(user_input)]))
    print(f"\n--- Sending to Gemini (History length: {len(conversation_history)}) ---")

    MAX_HISTORY_TURNS = 10
    max_history_items = MAX_HISTORY_TURNS * 2 + (1 if conversation_history and conversation_history[0].role == "system" else 0)
    if len(conversation_history) > max_history_items:
        print(f"Trimming conversation history from {len(conversation_history)} items to ~{max_history_items}")
        if conversation_history[0].role == "system":
             conversation_history = [conversation_history[0]] + conversation_history[-(max_history_items-1):]
        else:
             conversation_history = conversation_history[-max_history_items:]

    # --- Interaction Loop ---
    MAX_TOOL_LOOPS = 5
    loop_count = 0
    current_history_for_api = list(conversation_history)
    final_bot_message = ""

    try:
        while loop_count < MAX_TOOL_LOOPS:
            loop_count += 1
            print(f"Generation loop {loop_count}/{MAX_TOOL_LOOPS}...")

            response = model.generate_content(
                current_history_for_api,
                request_options={"timeout": 120},
            )

            if not response.candidates:
                 print("Warning: No candidates received from Gemini.")
                 final_bot_message = "[No response generated by the model.]"
                 # *** CORRECTED: Use genai.Content and genai.Part ***
                 current_history_for_api.append(genai.Content(role="model", parts=[genai.Part.from_text(final_bot_message)]))
                 break

            candidate = response.candidates[0]
            # Use genai.types for Candidate fields
            finish_reason = candidate.finish_reason

            # Append model's response (Content object) to history
            if candidate.content:
                current_history_for_api.append(candidate.content) # content should already be a genai.Content object

            # Check finish reason using genai.types.Candidate
            if finish_reason not in (genai.types.Candidate.FinishReason.STOP, genai.types.Candidate.FinishReason.TOOL_CALL):
                 print(f"Warning: Generation stopped unexpectedly. Reason: {finish_reason.name}")
                 stop_reason_msg = f"[Model stopped generating. Reason: {finish_reason.name}]"
                 partial_text = ""
                 if candidate.content and candidate.content.parts:
                     partial_text = "".join([p.text for p in candidate.content.parts if hasattr(p, 'text') and p.text])
                 final_bot_message = (partial_text + "\n" if partial_text else "") + stop_reason_msg
                 break

            # Use genai.types.Candidate for comparison
            has_tool_call = finish_reason == genai.types.Candidate.FinishReason.TOOL_CALL

            if has_tool_call:
                print("Tool call requested by model.")
                # Model response (genai.Content obj) is already appended above
                if not candidate.content or not candidate.content.parts:
                     print("Error: TOOL_CALL indicated but candidate content/parts is missing.")
                     final_bot_message = "[Model indicated tool use but provided no details.]"
                     break

                function_calls = [part.function_call for part in candidate.content.parts if hasattr(part, 'function_call') and part.function_call]

                if not function_calls:
                     print("Warning: TOOL_CALL finish reason but no valid function_call part found.")
                     final_bot_message = "".join([p.text for p in candidate.content.parts if hasattr(p, 'text') and p.text])
                     if not final_bot_message:
                          final_bot_message = "[Model indicated tool use but provided no callable function or text.]"
                     break

                tool_responses = []
                for func_call in function_calls:
                     # handle_function_call now returns a genai.Part object
                     function_response_part = handle_function_call(func_call)
                     tool_responses.append(function_response_part)

                if not tool_responses:
                     print("Warning: No valid tool responses generated despite TOOL_CALL.")
                     final_bot_message = "[Failed to process tool call request.]"
                     break

                # Add tool responses to history using genai.Content
                current_history_for_api.append(genai.Content(role="tool", parts=tool_responses)) # parts expects list of genai.Part
                print("Added tool response(s) to history. Continuing loop...")
                final_bot_message = ""
                continue

            else: # FinishReason == STOP
                print("No tool call requested. Final response received.")
                final_bot_message = ""
                code_parts_display = []
                # Extract text/code from the last model turn (genai.Content object) in history
                if current_history_for_api and current_history_for_api[-1].role == "model":
                    last_model_content = current_history_for_api[-1]
                    if last_model_content.parts:
                        for part in last_model_content.parts: # part is already a genai.Part object
                            if hasattr(part, 'text') and part.text:
                                final_bot_message += part.text
                            if hasattr(part, 'executable_code') and part.executable_code:
                                lang = getattr(getattr(part.executable_code, 'language', None), 'name', 'python').lower()
                                code = getattr(part.executable_code, 'code', '')
                                code_parts_display.append(f"Suggested Code ({lang}):\n```{'python' if lang == 'unknown_language' else lang}\n{code}\n```")
                            elif hasattr(part, 'code_execution_result') and part.code_execution_result:
                                # outcome check uses genai.types
                                outcome_enum = getattr(genai.types, 'ExecutableCodeResponse', None)
                                outcome_ok_val = getattr(outcome_enum.Outcome, 'OK', None) if outcome_enum and hasattr(outcome_enum, 'Outcome') else 1
                                outcome_val = getattr(part.code_execution_result, 'outcome', None)
                                outcome_str = "Success" if outcome_val == outcome_ok_val else "Failure"
                                output = getattr(part.code_execution_result, 'output', '')
                                code_parts_display.append(f"Code Execution Result ({outcome_str}):\n```\n{output}\n```")

                if code_parts_display:
                    final_bot_message += "\n\n" + "\n\n".join(code_parts_display)

                if not final_bot_message.strip():
                     final_bot_message = "[Assistant completed its turn without generating text output.]"
                     # Add this as a text Part to the last model Content object if it was otherwise empty
                     if current_history_for_api[-1].role == "model" and not any(hasattr(p,'text') and p.text for p in current_history_for_api[-1].parts):
                          if not hasattr(current_history_for_api[-1], 'parts') or not current_history_for_api[-1].parts:
                              current_history_for_api[-1].parts = []
                          # *** CORRECTED: Use genai.Part ***
                          current_history_for_api[-1].parts.append(genai.Part.from_text(final_bot_message))
                break

        # End of while loop
        if loop_count >= MAX_TOOL_LOOPS:
             print(f"Warning: Reached maximum tool execution loops ({MAX_TOOL_LOOPS}).")
             warning_msg = f"\n\n[Warning: Reached maximum tool execution loops ({MAX_TOOL_LOOPS}). The final response might be incomplete.]"
             final_bot_message += warning_msg
             # Append warning as a genai.Part to the last model message
             if current_history_for_api and current_history_for_api[-1].role == "model":
                 if not hasattr(current_history_for_api[-1], 'parts') or not current_history_for_api[-1].parts:
                     current_history_for_api[-1].parts = []
                 # *** CORRECTED: Use genai.Part ***
                 current_history_for_api[-1].parts.append(genai.Part.from_text(warning_msg))

        print("--- Response Generation Complete ---")

        # --- Format final output for Gradio Chatbot ---
        chatbot_display_list = []
        user_msg_buffer = None
        for content in current_history_for_api: # content is genai.Content object
            if content.role == "system": continue

            display_text = ""
            if hasattr(content, 'parts') and content.parts:
                for part in content.parts: # part is genai.Part object
                    # Extract text for display, format code etc.
                    if hasattr(part, 'text') and part.text:
                        display_text += part.text + "\n"
                    elif hasattr(part, 'executable_code') and part.executable_code:
                        lang = getattr(getattr(part.executable_code, 'language', None), 'name', 'python').lower()
                        code = getattr(part.executable_code, 'code', '')
                        display_text += f"\n```{'python' if lang == 'unknown_language' else lang}\n{code}\n```\n"
                    elif hasattr(part, 'code_execution_result') and part.code_execution_result:
                         # outcome check uses genai.types
                        outcome_enum = getattr(genai.types, 'ExecutableCodeResponse', None)
                        outcome_ok_val = getattr(outcome_enum.Outcome, 'OK', None) if outcome_enum and hasattr(outcome_enum, 'Outcome') else 1
                        outcome_val = getattr(part.code_execution_result, 'outcome', None)
                        outcome_str = "Success" if outcome_val == outcome_ok_val else "Failure"
                        output = getattr(part.code_execution_result, 'output', '')
                        display_text += f"\nCode Execution Result ({outcome_str}):\n```\n{output}\n```\n"

            display_text = display_text.strip()

            if not display_text and content.role not in ["tool"]: continue

            if content.role == "user":
                user_msg_buffer = display_text
            elif content.role == "model":
                if user_msg_buffer is not None:
                    chatbot_display_list.append([user_msg_buffer, display_text or "[Processing...]"])
                    user_msg_buffer = None
                else:
                    chatbot_display_list.append([None, display_text])
            # Ignore 'tool' role messages for chat display

        if user_msg_buffer is not None:
             chatbot_display_list.append([user_msg_buffer, None])

        return chatbot_display_list, current_history_for_api

    except Exception as e:
        print(f"ERROR during Gemini generation or tool processing: {str(e)}")
        traceback.print_exc()
        error_message = f"An error occurred: {str(e)}"
        error_display_list = []
        # Rebuild display from previous state (history_state)
        if isinstance(history_state, list):
            temp_user_msg = None
            for content in history_state: # content is genai.Content
                 if content.role == "system": continue
                 text = ""
                 if hasattr(content, 'parts') and content.parts:
                     # parts contains genai.Part objects
                     text = "".join([p.text for p in content.parts if hasattr(p, 'text')])
                 if content.role == "user": temp_user_msg = text
                 elif content.role == "model" and temp_user_msg:
                      error_display_list.append([temp_user_msg, text])
                      temp_user_msg = None
                 elif content.role == "model": error_display_list.append([None, text])
            if temp_user_msg: error_display_list.append([temp_user_msg, None])

        error_display_list.append([None, error_message])

        # Revert to history *before* this failed turn
        previous_history = conversation_history[:-1] if isinstance(conversation_history, list) and conversation_history else []
        return error_display_list, previous_history


# --- Gradio Interface ---

with gr.Blocks(title="Gemini AI Assistant w/ Tools", theme=gr.themes.Soft()) as demo:
    gr.Markdown(f"# πŸš€ Gemini AI Assistant ({MODEL_NAME})")
    gr.Markdown("Ask questions, request info from specific URLs, or ask for code/calculations. Uses function calling and code execution.")

    chatbot_display = gr.Chatbot(
        label="Conversation",
        bubble_full_width=False, # Keep param even if deprecated
        height=600,
        show_copy_button=True,
        render_markdown=True,
    )

    with gr.Row():
        msg_input = gr.Textbox(
            label="Your Query",
            placeholder="Ask anything...",
            lines=3,
            scale=4
        )
        with gr.Column(scale=1, min_width=150):
            send_btn = gr.Button("➑️ Send", variant="primary")
            clear_btn = gr.ClearButton(value="πŸ—‘οΈ Clear Chat")

    # State stores list of genai.Content objects
    chat_history_state = gr.State([])

    def user_message_update(user_message, history_display_list):
        """Appends user message to display list and clears input."""
        if not user_message.strip():
             return gr.update(value=""), history_display_list
        return gr.update(value=""), history_display_list + [[user_message, None]]

    def bot_response_update(history_display_list, history_state):
        """Calls backend Gemini function and updates display/state."""
        if not history_display_list or (len(history_display_list[-1]) > 1 and history_display_list[-1][1] is not None):
            print("Bot update called without pending user message in display list.")
            return history_display_list, history_state

        user_message = history_display_list[-1][0]
        print(f"User message being sent to backend: {user_message}")

        updated_display_list, updated_history_state = generate_response_with_tools(user_message, history_state)

        return updated_display_list, updated_history_state

    # --- Event Listeners ---
    msg_input.submit(
        user_message_update,
        [msg_input, chatbot_display],
        [msg_input, chatbot_display],
        queue=False,
    ).then(
        bot_response_update,
        [chatbot_display, chat_history_state], # Pass display list and history state
        [chatbot_display, chat_history_state]  # Receive updated display list and state
    )

    send_btn.click(
        user_message_update,
        [msg_input, chatbot_display],
        [msg_input, chatbot_display],
        queue=False,
    ).then(
        bot_response_update,
        [chatbot_display, chat_history_state],
        [chatbot_display, chat_history_state]
    )

    # Custom clear function resets state
    def clear_all():
         return ["", None, []]

    clear_btn.click(clear_all, [], [msg_input, chatbot_display, chat_history_state], queue=False)


if __name__ == "__main__":
    print("Starting Gradio App...")
    demo.queue().launch(server_name="0.0.0.0", server_port=7860, show_error=True)
    print("Gradio App Stopped.")