Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -22,7 +22,7 @@ GEMINI_MODELS = {
|
|
22 |
"gemini-1.5-flash-8b": ("Gemini 1.5 Flash 8B", "High volume and lower intelligence tasks."),
|
23 |
"gemini-2.0-flash": ("Gemini 2.0 Flash", "Next generation features, speed, thinking, realtime streaming, and multimodal generation."),
|
24 |
"gemini-2.0-flash-lite": ("Gemini 2.0 Flash-Lite", "Cost efficiency and low latency."),
|
25 |
-
# Note: Preview models might have shorter lifespans or different capabilities
|
26 |
# "gemini-2.5-flash-preview-04-17": ("Gemini 2.5 Flash Preview (04-17)", "Adaptive thinking, cost efficiency."),
|
27 |
# "gemini-2.5-pro-preview-03-25": ("Gemini 2.5 Pro Preview (03-25)", "Enhanced thinking and reasoning, multimodal understanding, advanced coding, and more."),
|
28 |
}
|
@@ -159,22 +159,23 @@ def configure_gemini(api_key: str | None, model_name: str | None) -> str:
|
|
159 |
# Check for empty string "" as well as None
|
160 |
if not api_key:
|
161 |
return "⚠️ Gemini API key is not set."
|
162 |
-
if not
|
163 |
-
|
|
|
164 |
try:
|
165 |
genai.configure(api_key=api_key)
|
166 |
# Attempt a simple call to verify credentials and model availability
|
167 |
# This will raise an exception if the key is invalid or model not found
|
168 |
genai.GenerativeModel(model_name).generate_content("ping", stream=False)
|
169 |
# This message indicates the API call *for configuration check* was successful
|
170 |
-
return f"✅ Gemini configured successfully with **{GEMINI_MODELS
|
171 |
except Exception as e:
|
172 |
# This message indicates the API call *for configuration check* failed
|
173 |
return f"❌ Error configuring Gemini: {e}"
|
174 |
|
175 |
def get_model_description(model_name: str | None) -> str:
|
176 |
"""Retrieves the description for a given model name."""
|
177 |
-
if model_name is None:
|
178 |
return "Select a model to see its description."
|
179 |
# Use .get with a default value to handle cases where the key might not be found
|
180 |
return GEMINI_MODELS.get(model_name, (model_name, "No description available."))[1]
|
@@ -255,9 +256,8 @@ def greet():
|
|
255 |
return [{"role": "assistant", "content": "Welcome! Please log in to Hugging Face and provide your Google AI Studio API key to start building Spaces. Once ready, type 'generate me a gradio app called myapp' or 'create' to begin."}]
|
256 |
|
257 |
# Helper function to update send button interactivity based on prereqs
|
258 |
-
#
|
259 |
-
|
260 |
-
def check_send_button_ready(hf_profile: gr.OAuthProfile | None, hf_token: gr.OAuthToken | None, gemini_key: str | None, gemini_model: str | None, *args, **kwargs) -> gr.update:
|
261 |
"""Checks if HF login and Gemini configuration are complete and returns update for button interactivity."""
|
262 |
# --- START ENHANCED DEBUGGING LOGS ---
|
263 |
print("\n--- check_send_button_ready START ---")
|
@@ -267,8 +267,6 @@ def check_send_button_ready(hf_profile: gr.OAuthProfile | None, hf_token: gr.OAu
|
|
267 |
api_key_display = gemini_key[:5] if isinstance(gemini_key, str) and gemini_key else ('Empty String' if isinstance(gemini_key, str) and gemini_key == "" else 'None')
|
268 |
print(f" Received gemini_key: Value starts with '{api_key_display}'")
|
269 |
print(f" Received gemini_model: {gemini_model}")
|
270 |
-
print(f" Received extra args: {args}") # Added debug for extra args
|
271 |
-
print(f" Received extra kwargs: {kwargs}") # Added debug for extra kwargs
|
272 |
# --- END ENHANCED DEBUGGING LOGS ---
|
273 |
|
274 |
is_logged_in = hf_profile is not None and hf_token is not None
|
@@ -281,27 +279,87 @@ def check_send_button_ready(hf_profile: gr.OAuthProfile | None, hf_token: gr.OAu
|
|
281 |
|
282 |
return gr.update(interactive=is_ready)
|
283 |
|
284 |
-
#
|
285 |
-
#
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
#
|
290 |
-
|
291 |
-
def
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
298 |
|
299 |
|
300 |
# This is the main generator function for the workflow, triggered by the 'Send' button
|
301 |
-
#
|
302 |
-
# It MUST also yield/return ALL state variables in the same order they appear in the `outputs` list of the `.click()` event.
|
303 |
-
# Added back *args, **kwargs to the generator function signature and yield for robustness,
|
304 |
-
# as the previous attempt suggested this might be necessary for state consistency within the generator's lifecycle.
|
305 |
def ai_workflow_chat(
|
306 |
message: str,
|
307 |
history: list[dict],
|
@@ -323,8 +381,8 @@ def ai_workflow_chat(
|
|
323 |
generated_code_state: str | None,
|
324 |
use_grounding_state: bool, # Value from use_grounding_checkbox
|
325 |
# Absorb potential extra args passed by Gradio event listeners (important for generators)
|
326 |
-
*args,
|
327 |
-
**kwargs
|
328 |
) -> tuple[
|
329 |
list[dict], # 0: Updated chat history (for chatbot)
|
330 |
str | None, # 1: Updated repo_id (for repo_id state)
|
@@ -373,8 +431,8 @@ def ai_workflow_chat(
|
|
373 |
print(f" app_description_state: {app_description_state}")
|
374 |
print(f" repo_name_state: {repo_name_state}")
|
375 |
print(f" generated_code_state: {'Present' if generated_code_state is not None else 'None'}")
|
376 |
-
|
377 |
-
|
378 |
print("--- END DEBUGGING ai_workflow_chat inputs ---\n")
|
379 |
|
380 |
|
@@ -1011,8 +1069,7 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
1011 |
|
1012 |
# --- Define Event Handlers and Chains AFTER all components and required lists are defined ---
|
1013 |
|
1014 |
-
# Define the inputs used for checking prerequisites
|
1015 |
-
# This list provides the values to the check_send_button_ready function
|
1016 |
send_button_interactive_binding_inputs = [
|
1017 |
hf_profile,
|
1018 |
hf_token,
|
@@ -1024,61 +1081,73 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
1024 |
|
1025 |
|
1026 |
# Trigger check_send_button_ready whenever any prerequisite state changes
|
1027 |
-
#
|
1028 |
-
# The check_send_button_ready function signature now handles the implicit first argument.
|
1029 |
hf_profile.change(
|
1030 |
-
|
|
|
|
|
|
|
|
|
|
|
1031 |
inputs=send_button_interactive_binding_inputs,
|
1032 |
outputs=send_button_update_output,
|
1033 |
)
|
1034 |
-
|
1035 |
-
|
|
|
1036 |
inputs=send_button_interactive_binding_inputs,
|
1037 |
outputs=send_button_update_output,
|
1038 |
)
|
1039 |
-
#
|
1040 |
-
|
1041 |
-
|
1042 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1043 |
gemini_input.change(
|
1044 |
-
|
1045 |
-
|
1046 |
-
|
|
|
1047 |
).then(
|
1048 |
# Configure Gemini using the updated state variables
|
1049 |
-
|
|
|
1050 |
inputs=[gemini_api_key_state, gemini_model_state], # Explicitly pass the required states
|
1051 |
-
outputs=[gemini_status] # Update Gemini status display
|
1052 |
-
).then(
|
1053 |
-
# Trigger check_send_button_ready after status update
|
1054 |
-
# check_send_button_ready signature now handles the implicit previous output + explicit inputs
|
1055 |
-
check_send_button_ready,
|
1056 |
-
inputs=send_button_interactive_binding_inputs,
|
1057 |
-
outputs=send_button_update_output
|
1058 |
)
|
|
|
1059 |
|
1060 |
# Handle Gemini Model Selector change: Update model state -> Update description -> Configure Gemini status -> Update send button state
|
1061 |
model_selector.change(
|
1062 |
-
|
1063 |
-
|
1064 |
-
|
|
|
1065 |
).then(
|
1066 |
# Update the model description display
|
1067 |
-
|
|
|
1068 |
inputs=[gemini_model_state], # Get the new state value
|
1069 |
-
outputs=[model_description_text] # Update description UI
|
1070 |
).then(
|
1071 |
# Configure Gemini using the updated state variables
|
1072 |
-
|
|
|
1073 |
inputs=[gemini_api_key_state, gemini_model_state], # Explicitly pass the required states
|
1074 |
-
outputs=[gemini_status] # Update Gemini status display
|
1075 |
-
).then(
|
1076 |
-
# Trigger check_send_button_ready after status update
|
1077 |
-
# check_send_button_ready signature now handles the implicit previous output + explicit inputs
|
1078 |
-
check_send_button_ready,
|
1079 |
-
inputs=send_button_interactive_binding_inputs,
|
1080 |
-
outputs=send_button_update_output
|
1081 |
)
|
|
|
1082 |
|
1083 |
|
1084 |
# Handle Grounding checkbox change: update grounding state
|
@@ -1092,17 +1161,24 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
1092 |
)
|
1093 |
|
1094 |
# Link Workflow State variable change to UI status display
|
1095 |
-
|
|
|
1096 |
|
1097 |
# Link Repo ID State variable change to UI status display
|
1098 |
-
#
|
1099 |
-
|
|
|
1100 |
|
1101 |
|
1102 |
# The main event handler for the Send button (generator)
|
1103 |
-
# This
|
|
|
|
|
|
|
|
|
|
|
1104 |
send_btn.click(
|
1105 |
-
ai_workflow_chat, # The generator function to run (signature
|
1106 |
inputs=[
|
1107 |
user_input, chatbot, # UI component inputs (message, current chat history)
|
1108 |
hf_profile, hf_token, # HF State variables
|
@@ -1132,31 +1208,32 @@ with gr.Blocks(title="AI-Powered HF Space App Builder") as ai_builder_tab:
|
|
1132 |
# This chain runs once when the app loads
|
1133 |
ai_builder_tab.load(
|
1134 |
# Action 1: Show profile (loads cached login if available)
|
1135 |
-
|
|
|
1136 |
inputs=None,
|
1137 |
outputs=login_status # Updates UI. This output becomes the implicit first arg for the next .then()
|
1138 |
).then(
|
1139 |
# Action 2: Configure Gemini using initial state
|
1140 |
-
#
|
1141 |
-
|
1142 |
inputs=[gemini_api_key_state, gemini_model_state], # Explicitly pass the required states
|
1143 |
outputs=[gemini_status] # Update Gemini status display. This output becomes the implicit first arg for the next .then()
|
1144 |
).then(
|
1145 |
# Action 3: After initial load checks, update the button state based on initial states
|
1146 |
-
#
|
1147 |
-
|
1148 |
-
inputs=send_button_interactive_binding_inputs,
|
1149 |
outputs=send_button_update_output, # Update the send button. This output becomes implicit first arg for next .then()
|
1150 |
).then(
|
1151 |
# Action 4: Update the model description text based on the default selected model
|
1152 |
-
# Use the wrapper
|
1153 |
-
|
1154 |
inputs=[gemini_model_state], # Get the default model name from state
|
1155 |
-
outputs=[model_description_text] # Update description UI
|
1156 |
).then(
|
1157 |
# Action 5: Add the initial welcome message to the chat history
|
1158 |
-
#
|
1159 |
-
|
1160 |
inputs=None, # Greet takes no explicit inputs
|
1161 |
outputs=chatbot # Updates the chatbot display
|
1162 |
)
|
|
|
22 |
"gemini-1.5-flash-8b": ("Gemini 1.5 Flash 8B", "High volume and lower intelligence tasks."),
|
23 |
"gemini-2.0-flash": ("Gemini 2.0 Flash", "Next generation features, speed, thinking, realtime streaming, and multimodal generation."),
|
24 |
"gemini-2.0-flash-lite": ("Gemini 2.0 Flash-Lite", "Cost efficiency and low latency."),
|
25 |
+
# Note: Preview models might have shorter lifespans or different capabilities. Uncomment if you want to include them.
|
26 |
# "gemini-2.5-flash-preview-04-17": ("Gemini 2.5 Flash Preview (04-17)", "Adaptive thinking, cost efficiency."),
|
27 |
# "gemini-2.5-pro-preview-03-25": ("Gemini 2.5 Pro Preview (03-25)", "Enhanced thinking and reasoning, multimodal understanding, advanced coding, and more."),
|
28 |
}
|
|
|
159 |
# Check for empty string "" as well as None
|
160 |
if not api_key:
|
161 |
return "⚠️ Gemini API key is not set."
|
162 |
+
# Check if model_name is None or not a valid key in GEMINI_MODELS
|
163 |
+
if not model_name or model_name not in GEMINI_MODELS:
|
164 |
+
return "⚠️ Please select a valid Gemini model."
|
165 |
try:
|
166 |
genai.configure(api_key=api_key)
|
167 |
# Attempt a simple call to verify credentials and model availability
|
168 |
# This will raise an exception if the key is invalid or model not found
|
169 |
genai.GenerativeModel(model_name).generate_content("ping", stream=False)
|
170 |
# This message indicates the API call *for configuration check* was successful
|
171 |
+
return f"✅ Gemini configured successfully with **{GEMINI_MODELS[model_name][0]}**."
|
172 |
except Exception as e:
|
173 |
# This message indicates the API call *for configuration check* failed
|
174 |
return f"❌ Error configuring Gemini: {e}"
|
175 |
|
176 |
def get_model_description(model_name: str | None) -> str:
|
177 |
"""Retrieves the description for a given model name."""
|
178 |
+
if model_name is None or model_name not in GEMINI_MODELS:
|
179 |
return "Select a model to see its description."
|
180 |
# Use .get with a default value to handle cases where the key might not be found
|
181 |
return GEMINI_MODELS.get(model_name, (model_name, "No description available."))[1]
|
|
|
256 |
return [{"role": "assistant", "content": "Welcome! Please log in to Hugging Face and provide your Google AI Studio API key to start building Spaces. Once ready, type 'generate me a gradio app called myapp' or 'create' to begin."}]
|
257 |
|
258 |
# Helper function to update send button interactivity based on prereqs
|
259 |
+
# This function has the clean signature it expects. Wrappers handle Gradio's argument passing.
|
260 |
+
def check_send_button_ready(hf_profile: gr.OAuthProfile | None, hf_token: gr.OAuthToken | None, gemini_key: str | None, gemini_model: str | None) -> gr.update:
|
|
|
261 |
"""Checks if HF login and Gemini configuration are complete and returns update for button interactivity."""
|
262 |
# --- START ENHANCED DEBUGGING LOGS ---
|
263 |
print("\n--- check_send_button_ready START ---")
|
|
|
267 |
api_key_display = gemini_key[:5] if isinstance(gemini_key, str) and gemini_key else ('Empty String' if isinstance(gemini_key, str) and gemini_key == "" else 'None')
|
268 |
print(f" Received gemini_key: Value starts with '{api_key_display}'")
|
269 |
print(f" Received gemini_model: {gemini_model}")
|
|
|
|
|
270 |
# --- END ENHANCED DEBUGGING LOGS ---
|
271 |
|
272 |
is_logged_in = hf_profile is not None and hf_token is not None
|
|
|
279 |
|
280 |
return gr.update(interactive=is_ready)
|
281 |
|
282 |
+
# --- Wrappers to handle Gradio's argument passing in event chains ---
|
283 |
+
# These wrappers accept whatever Gradio passes (*args, **kwargs) and call the target function
|
284 |
+
# with the specific arguments it expects, extracted from *args based on the expected call signature.
|
285 |
+
|
286 |
+
# Wrapper for functions called in .then() chains with specific inputs list: expects (prev_output, *input_values)
|
287 |
+
# e.g., .then(wrapper, inputs=[s1, s2]) -> wrapper receives (prev_out, s1_val, s2_val)
|
288 |
+
def wrapper_from_then_inputs(func, num_inputs):
|
289 |
+
def wrapped(*args, **kwargs):
|
290 |
+
# We expect num_inputs values at the end of *args, after prev_output (index 0)
|
291 |
+
if len(args) > num_inputs:
|
292 |
+
required_args = args[-num_inputs:]
|
293 |
+
try:
|
294 |
+
return func(*required_args)
|
295 |
+
except Exception as e:
|
296 |
+
print(f"Error calling wrapped function {func.__name__} with args {required_args}: {e}")
|
297 |
+
# Provide a fallback or re-raise depending on context
|
298 |
+
if func == configure_gemini: return f"❌ Error configuring Gemini: {e}"
|
299 |
+
if func == get_model_description: return f"Error getting description: {e}"
|
300 |
+
if func == check_send_button_ready: return gr.update(interactive=False)
|
301 |
+
raise # Re-raise if no specific fallback
|
302 |
+
else:
|
303 |
+
print(f"Warning: wrapper_from_then_inputs for {func.__name__} received unexpected args (expecting at least {num_inputs+1}): {args}")
|
304 |
+
# Provide a fallback or re-raise
|
305 |
+
if func == configure_gemini: return "❌ Error configuring Gemini: unexpected arguments received."
|
306 |
+
if func == get_model_description: return "No description available (unexpected arguments received)."
|
307 |
+
if func == check_send_button_ready: return gr.update(interactive=False)
|
308 |
+
raise ValueError(f"Unexpected args received for {func.__name__}: {args}")
|
309 |
+
return wrapped
|
310 |
+
|
311 |
+
# Wrapper for functions called by .change() trigger with specific inputs list: expects (changed_value, *input_values)
|
312 |
+
# e.g., component.change(wrapper, inputs=[s1, s2]) -> wrapper receives (changed_val, s1_val, s2_val)
|
313 |
+
def wrapper_from_change_inputs(func, num_inputs):
|
314 |
+
def wrapped(*args, **kwargs):
|
315 |
+
# We expect num_inputs values at the end of *args, after the changed_value (index 0)
|
316 |
+
if len(args) > num_inputs:
|
317 |
+
required_args = args[-num_inputs:]
|
318 |
+
try:
|
319 |
+
return func(*required_args)
|
320 |
+
except Exception as e:
|
321 |
+
print(f"Error calling wrapped function {func.__name__} with args {required_args}: {e}")
|
322 |
+
if func == check_send_button_ready: return gr.update(interactive=False)
|
323 |
+
raise # Re-raise if no specific fallback
|
324 |
+
else:
|
325 |
+
print(f"Warning: wrapper_from_change_inputs for {func.__name__} received unexpected args (expecting at least {num_inputs+1}): {args}")
|
326 |
+
if func == check_send_button_ready: return gr.update(interactive=False)
|
327 |
+
raise ValueError(f"Unexpected args received for {func.__name__}: {args}")
|
328 |
+
return wrapped
|
329 |
+
|
330 |
+
|
331 |
+
# Wrapper for functions called in .then() chains with inputs=None: expects (prev_output,)
|
332 |
+
# e.g., .then(wrapper, inputs=None) -> wrapper receives (prev_out,)
|
333 |
+
def wrapper_from_prev_output(func):
|
334 |
+
def wrapped(*args, **kwargs):
|
335 |
+
# We expect only prev_output, or sometimes nothing if the chain starts
|
336 |
+
if len(args) >= 0: # Just accept anything here
|
337 |
+
try:
|
338 |
+
# The target function expects 0 args, so call it with no args
|
339 |
+
return func()
|
340 |
+
except Exception as e:
|
341 |
+
print(f"Error calling wrapped function {func.__name__} with args {args}: {e}")
|
342 |
+
# Provide a fallback or re-raise
|
343 |
+
if func == greet: return [{"role": "assistant", "content": f"❌ Error loading initial message: {e}"}]
|
344 |
+
raise # Re-raise if no specific fallback
|
345 |
+
else:
|
346 |
+
print(f"Warning: wrapper_from_prev_output for {func.__name__} received unexpected args: {args}")
|
347 |
+
if func == greet: return [{"role": "assistant", "content": "❌ Error loading initial message: unexpected arguments received."}]
|
348 |
+
raise ValueError(f"Unexpected args received for {func.__name__}: {args}")
|
349 |
+
return wrapped
|
350 |
+
|
351 |
+
# Instantiate specific wrappers using the generic ones
|
352 |
+
wrapper_check_button_change = wrapper_from_change_inputs(check_send_button_ready, 4) # Expects (changed, s1, s2, s3, s4)
|
353 |
+
wrapper_check_button_then = wrapper_from_then_inputs(check_send_button_ready, 4) # Expects (prev_out, s1, s2, s3, s4)
|
354 |
+
|
355 |
+
wrapper_configure_gemini_then = wrapper_from_then_inputs(configure_gemini, 2) # Expects (prev_out, s1, s2) -> api_key, model_name
|
356 |
+
wrapper_get_model_description_then = wrapper_from_then_inputs(get_model_description, 1) # Expects (prev_out, s1) -> model_name
|
357 |
+
|
358 |
+
wrapper_greet_then = wrapper_from_prev_output(greet) # Expects (prev_out,), needs 0 args
|
359 |
|
360 |
|
361 |
# This is the main generator function for the workflow, triggered by the 'Send' button
|
362 |
+
# Inputs and Outputs list must match exactly. The generator receives values from the inputs list.
|
|
|
|
|
|
|
363 |
def ai_workflow_chat(
|
364 |
message: str,
|
365 |
history: list[dict],
|
|
|
381 |
generated_code_state: str | None,
|
382 |
use_grounding_state: bool, # Value from use_grounding_checkbox
|
383 |
# Absorb potential extra args passed by Gradio event listeners (important for generators)
|
384 |
+
*args, # Generators might receive extra args, need to accept them but don't need to yield unless they are state
|
385 |
+
**kwargs # Generators might receive extra kwargs
|
386 |
) -> tuple[
|
387 |
list[dict], # 0: Updated chat history (for chatbot)
|
388 |
str | None, # 1: Updated repo_id (for repo_id state)
|
|
|
431 |
print(f" app_description_state: {app_description_state}")
|
432 |
print(f" repo_name_state: {repo_name_state}")
|
433 |
print(f" generated_code_state: {'Present' if generated_code_state is not None else 'None'}")
|
434 |
+
print(f" *args (unexpected by generator): {args}") # Added debug for unexpected args
|
435 |
+
print(f" **kwargs (unexpected by generator): {kwargs}") # Added debug for unexpected kwargs
|
436 |
print("--- END DEBUGGING ai_workflow_chat inputs ---\n")
|
437 |
|
438 |
|
|
|
1069 |
|
1070 |
# --- Define Event Handlers and Chains AFTER all components and required lists are defined ---
|
1071 |
|
1072 |
+
# Define the inputs used for checking prerequisites (These are State components)
|
|
|
1073 |
send_button_interactive_binding_inputs = [
|
1074 |
hf_profile,
|
1075 |
hf_token,
|
|
|
1081 |
|
1082 |
|
1083 |
# Trigger check_send_button_ready whenever any prerequisite state changes
|
1084 |
+
# Use the specific change wrapper which expects (changed_value, *input_values)
|
|
|
1085 |
hf_profile.change(
|
1086 |
+
wrapper_check_button_change,
|
1087 |
+
inputs=send_button_interactive_binding_inputs, # Pass all 4 prerequisite states
|
1088 |
+
outputs=send_button_update_output, # Update only the send button
|
1089 |
+
)
|
1090 |
+
hf_token.change(
|
1091 |
+
wrapper_check_button_change,
|
1092 |
inputs=send_button_interactive_binding_inputs,
|
1093 |
outputs=send_button_update_output,
|
1094 |
)
|
1095 |
+
# When gemini_api_key_state changes (updated by gemini_input.change), check button readiness
|
1096 |
+
gemini_api_key_state.change(
|
1097 |
+
wrapper_check_button_change,
|
1098 |
inputs=send_button_interactive_binding_inputs,
|
1099 |
outputs=send_button_update_output,
|
1100 |
)
|
1101 |
+
# When gemini_model_state changes (updated by model_selector.change), check button readiness
|
1102 |
+
gemini_model_state.change(
|
1103 |
+
wrapper_check_button_change,
|
1104 |
+
inputs=send_button_interactive_binding_inputs,
|
1105 |
+
outputs=send_button_update_output,
|
1106 |
+
)
|
1107 |
+
|
1108 |
+
|
1109 |
+
# Handle login button click: Update profile/token state -> Their .change handlers trigger check_send_button_ready
|
1110 |
+
login_btn.click(
|
1111 |
+
lambda x: (x[0], x[1]), # Lambda takes the LoginButton output (profile, token tuple) and returns it
|
1112 |
+
inputs=[login_btn], # Pass the LoginButton itself to get its output
|
1113 |
+
outputs=[hf_profile, hf_token] # Update state variables
|
1114 |
+
)
|
1115 |
+
|
1116 |
+
# Handle Gemini Key Input change: Update key state -> Configure Gemini status -> Update send button state
|
1117 |
gemini_input.change(
|
1118 |
+
# Lambda receives the new value of gemini_input (1 arg) because inputs=None (implied)
|
1119 |
+
lambda new_key_value: new_key_value,
|
1120 |
+
inputs=None, # Only need the new value of the changed component
|
1121 |
+
outputs=[gemini_api_key_state] # This output becomes the implicit first arg for the next .then() in this chain
|
1122 |
).then(
|
1123 |
# Configure Gemini using the updated state variables
|
1124 |
+
# Use the then_inputs wrapper which expects (prev_output, api_key_val, model_name_val)
|
1125 |
+
wrapper_configure_gemini_then,
|
1126 |
inputs=[gemini_api_key_state, gemini_model_state], # Explicitly pass the required states
|
1127 |
+
outputs=[gemini_status] # Update Gemini status display. This output becomes the implicit first arg for the next .then()
|
|
|
|
|
|
|
|
|
|
|
|
|
1128 |
)
|
1129 |
+
# The .then chain continues from the outputs of the configure_gemini call, handled by gemini_api_key_state.change handler above
|
1130 |
|
1131 |
# Handle Gemini Model Selector change: Update model state -> Update description -> Configure Gemini status -> Update send button state
|
1132 |
model_selector.change(
|
1133 |
+
# Lambda receives the new value of model_selector (1 arg)
|
1134 |
+
lambda new_model_name: new_model_name,
|
1135 |
+
inputs=None, # Only need the new value of the changed component
|
1136 |
+
outputs=[gemini_model_state] # This output becomes the implicit first arg for the next .then()
|
1137 |
).then(
|
1138 |
# Update the model description display
|
1139 |
+
# Use the then_inputs wrapper which expects (prev_output, model_name_val)
|
1140 |
+
wrapper_get_model_description_then,
|
1141 |
inputs=[gemini_model_state], # Get the new state value
|
1142 |
+
outputs=[model_description_text] # Update description UI. This output becomes implicit first arg for next .then()
|
1143 |
).then(
|
1144 |
# Configure Gemini using the updated state variables
|
1145 |
+
# Use the then_inputs wrapper which expects (prev_output, api_key_val, model_name_val)
|
1146 |
+
wrapper_configure_gemini_then,
|
1147 |
inputs=[gemini_api_key_state, gemini_model_state], # Explicitly pass the required states
|
1148 |
+
outputs=[gemini_status] # Update Gemini status display. This output becomes the implicit first arg for the next .then()
|
|
|
|
|
|
|
|
|
|
|
|
|
1149 |
)
|
1150 |
+
# The .then chain continues from the outputs of the configure_gemini call, handled by gemini_model_state.change handler above
|
1151 |
|
1152 |
|
1153 |
# Handle Grounding checkbox change: update grounding state
|
|
|
1161 |
)
|
1162 |
|
1163 |
# Link Workflow State variable change to UI status display
|
1164 |
+
# Lambda receives the new state value (1 arg) because inputs=None
|
1165 |
+
workflow.change(lambda new_state_value: new_state_value, inputs=None, outputs=status_text)
|
1166 |
|
1167 |
# Link Repo ID State variable change to UI status display
|
1168 |
+
# Lambda receives the new state value (1 arg) because inputs=None
|
1169 |
+
# The warning about receiving 0 args persists, likely ignorable for this lambda
|
1170 |
+
repo_id.change(lambda new_repo_id_value: new_repo_id_value if new_repo_id_value else "None", inputs=None, outputs=repo_id_text)
|
1171 |
|
1172 |
|
1173 |
# The main event handler for the Send button (generator)
|
1174 |
+
# This .click() event triggers the ai_workflow_chat generator function
|
1175 |
+
# Inputs are read from UI components AND State variables
|
1176 |
+
# Outputs are updated by the values yielded from the generator
|
1177 |
+
# Ensure inputs and outputs match the ai_workflow_chat signature and yield tuple EXACTLY.
|
1178 |
+
# This call is direct, not in a .then() chain, so it does NOT receive a prev_output arg.
|
1179 |
+
# It receives args only from the inputs list.
|
1180 |
send_btn.click(
|
1181 |
+
ai_workflow_chat, # The generator function to run (signature handles potential extra args, just in case)
|
1182 |
inputs=[
|
1183 |
user_input, chatbot, # UI component inputs (message, current chat history)
|
1184 |
hf_profile, hf_token, # HF State variables
|
|
|
1208 |
# This chain runs once when the app loads
|
1209 |
ai_builder_tab.load(
|
1210 |
# Action 1: Show profile (loads cached login if available)
|
1211 |
+
# show_profile expects 1 arg (profile) or None. It receives 1 from load. Correct.
|
1212 |
+
show_profile,
|
1213 |
inputs=None,
|
1214 |
outputs=login_status # Updates UI. This output becomes the implicit first arg for the next .then()
|
1215 |
).then(
|
1216 |
# Action 2: Configure Gemini using initial state
|
1217 |
+
# Use the then_inputs wrapper which expects (prev_output, api_key_val, model_name_val)
|
1218 |
+
wrapper_configure_gemini_then,
|
1219 |
inputs=[gemini_api_key_state, gemini_model_state], # Explicitly pass the required states
|
1220 |
outputs=[gemini_status] # Update Gemini status display. This output becomes the implicit first arg for the next .then()
|
1221 |
).then(
|
1222 |
# Action 3: After initial load checks, update the button state based on initial states
|
1223 |
+
# Use the then_inputs wrapper which expects (prev_output, *prereq_state_values)
|
1224 |
+
wrapper_check_button_then,
|
1225 |
+
inputs=send_button_interactive_binding_inputs, # Pass all 4 prerequisite states
|
1226 |
outputs=send_button_update_output, # Update the send button. This output becomes implicit first arg for next .then()
|
1227 |
).then(
|
1228 |
# Action 4: Update the model description text based on the default selected model
|
1229 |
+
# Use the then_inputs wrapper which expects (prev_output, model_name_val)
|
1230 |
+
wrapper_get_model_description_then,
|
1231 |
inputs=[gemini_model_state], # Get the default model name from state
|
1232 |
+
outputs=[model_description_text] # Update description UI. This output becomes implicit first arg for next .then()
|
1233 |
).then(
|
1234 |
# Action 5: Add the initial welcome message to the chat history
|
1235 |
+
# Use the prev_output wrapper which expects (prev_output,)
|
1236 |
+
wrapper_greet_then,
|
1237 |
inputs=None, # Greet takes no explicit inputs
|
1238 |
outputs=chatbot # Updates the chatbot display
|
1239 |
)
|