MilanM commited on
Commit
fd936d4
·
verified ·
1 Parent(s): 8e508a3

Create app_v2_backup.py

Browse files
Files changed (1) hide show
  1. app_v2_backup.py +1348 -0
app_v2_backup.py ADDED
@@ -0,0 +1,1348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import marimo
2
+
3
+ __generated_with = "0.11.16"
4
+ app = marimo.App(width="medium")
5
+
6
+
7
+ @app.cell
8
+ def _():
9
+ import marimo as mo
10
+ import os
11
+ return mo, os
12
+
13
+
14
+ @app.cell
15
+ def _():
16
+ def get_markdown_content(file_path):
17
+ with open(file_path, 'r', encoding='utf-8') as file:
18
+ content = file.read()
19
+ return content
20
+ return (get_markdown_content,)
21
+
22
+
23
+ @app.cell
24
+ def _(get_markdown_content, mo):
25
+ intro_text = get_markdown_content('intro_markdown/intro.md')
26
+ intro_marimo = get_markdown_content('intro_markdown/intro_marimo.md')
27
+ intro_notebook = get_markdown_content('intro_markdown/intro_notebook.md')
28
+ intro_comparison = get_markdown_content('intro_markdown/intro_comparison.md')
29
+
30
+ intro = mo.carousel([
31
+ mo.md(f"{intro_text}"),
32
+ mo.md(f"{intro_marimo}"),
33
+ mo.md(f"{intro_notebook}"),
34
+ mo.md(f"{intro_comparison}"),
35
+ ])
36
+
37
+ mo.accordion({"## Notebook Introduction":intro})
38
+ return intro, intro_comparison, intro_marimo, intro_notebook, intro_text
39
+
40
+
41
+ @app.cell
42
+ def _(os):
43
+ ### Imports
44
+ from typing import (
45
+ Any, Dict, List, Optional, Pattern, Set, Union, Tuple
46
+ )
47
+ from pathlib import Path
48
+ from urllib.request import urlopen
49
+ # from rich.markdown import Markdown as Markd
50
+ from rich.text import Text
51
+ from rich import print
52
+ from tqdm import tqdm
53
+ from enum import Enum
54
+ import pandas as pd
55
+ import tempfile
56
+ import requests
57
+ import getpass
58
+ import urllib3
59
+ import base64
60
+ import time
61
+ import json
62
+ import uuid
63
+ import ssl
64
+ import ast
65
+ import re
66
+
67
+ pd.set_option('display.max_columns', None)
68
+ pd.set_option('display.max_rows', None)
69
+ pd.set_option('display.max_colwidth', None)
70
+ pd.set_option('display.width', None)
71
+
72
+ # Set explicit temporary directory
73
+ os.environ['TMPDIR'] = '/tmp'
74
+
75
+ # Make sure Python's tempfile module also uses this directory
76
+ tempfile.tempdir = '/tmp'
77
+ return (
78
+ Any,
79
+ Dict,
80
+ Enum,
81
+ List,
82
+ Optional,
83
+ Path,
84
+ Pattern,
85
+ Set,
86
+ Text,
87
+ Tuple,
88
+ Union,
89
+ ast,
90
+ base64,
91
+ getpass,
92
+ json,
93
+ pd,
94
+ print,
95
+ re,
96
+ requests,
97
+ ssl,
98
+ tempfile,
99
+ time,
100
+ tqdm,
101
+ urllib3,
102
+ urlopen,
103
+ uuid,
104
+ )
105
+
106
+
107
+ @app.cell
108
+ def _(mo):
109
+ ### Credentials for the watsonx.ai SDK client
110
+
111
+ # Endpoints
112
+ wx_platform_url = "https://api.dataplatform.cloud.ibm.com"
113
+ regions = {
114
+ "US": "https://us-south.ml.cloud.ibm.com",
115
+ "EU": "https://eu-de.ml.cloud.ibm.com",
116
+ "GB": "https://eu-gb.ml.cloud.ibm.com",
117
+ "JP": "https://jp-tok.ml.cloud.ibm.com",
118
+ "AU": "https://au-syd.ml.cloud.ibm.com",
119
+ "CA": "https://ca-tor.ml.cloud.ibm.com"
120
+ }
121
+
122
+ # Create a form with multiple elements
123
+ client_instantiation_form = (
124
+ mo.md('''
125
+ ###**watsonx.ai credentials:**
126
+
127
+ {wx_region}
128
+
129
+ {wx_api_key}
130
+
131
+ {space_id}
132
+ ''').style(max_height="300px", overflow="auto", border_color="blue")
133
+ .batch(
134
+ wx_region = mo.ui.dropdown(regions, label="Select your watsonx.ai region:", value="US", searchable=True),
135
+ wx_api_key = mo.ui.text(placeholder="Add your IBM Cloud api-key...", label="IBM Cloud Api-key:", kind="password"),
136
+ # project_id = mo.ui.text(placeholder="Add your watsonx.ai project_id...", label="Project_ID:", kind="text"),
137
+ space_id = mo.ui.text(placeholder="Add your watsonx.ai space_id...", label="Space_ID:", kind="text")
138
+ ,)
139
+ .form(show_clear_button=True, bordered=False)
140
+ )
141
+
142
+
143
+ # client_instantiation_form
144
+ return client_instantiation_form, regions, wx_platform_url
145
+
146
+
147
+ @app.cell
148
+ def _(client_instantiation_form, mo):
149
+ from ibm_watsonx_ai import APIClient, Credentials
150
+
151
+ def setup_task_credentials(deployment_client):
152
+ # Get existing task credentials
153
+ existing_credentials = deployment_client.task_credentials.get_details()
154
+
155
+ # Delete existing credentials if any
156
+ if "resources" in existing_credentials and existing_credentials["resources"]:
157
+ for cred in existing_credentials["resources"]:
158
+ cred_id = deployment_client.task_credentials.get_id(cred)
159
+ deployment_client.task_credentials.delete(cred_id)
160
+
161
+ # Store new credentials
162
+ return deployment_client.task_credentials.store()
163
+
164
+ if client_instantiation_form.value:
165
+ ### Instantiate the watsonx.ai client
166
+ wx_credentials = Credentials(
167
+ url=client_instantiation_form.value["wx_region"],
168
+ api_key=client_instantiation_form.value["wx_api_key"]
169
+ )
170
+
171
+ # project_client = APIClient(credentials=wx_credentials, project_id=client_instantiation_form.value["project_id"])
172
+ deployment_client = APIClient(credentials=wx_credentials, space_id=client_instantiation_form.value["space_id"])
173
+
174
+ task_credentials_details = setup_task_credentials(deployment_client)
175
+ else:
176
+ # project_client = None
177
+ deployment_client = None
178
+ task_credentials_details = None
179
+
180
+ template_variant = mo.ui.dropdown(["Base","Stream Files to IBM COS [Example]"], label="Code Template:", value="Base")
181
+
182
+ if deployment_client is not None:
183
+ client_callout_kind = "success"
184
+ else:
185
+ client_callout_kind = "neutral"
186
+
187
+ client_callout = mo.callout(template_variant, kind=client_callout_kind)
188
+
189
+ # client_callout
190
+ return (
191
+ APIClient,
192
+ Credentials,
193
+ client_callout,
194
+ client_callout_kind,
195
+ deployment_client,
196
+ setup_task_credentials,
197
+ task_credentials_details,
198
+ template_variant,
199
+ wx_credentials,
200
+ )
201
+
202
+
203
+ @app.cell
204
+ def _(
205
+ client_callout,
206
+ client_instantiation_form,
207
+ deploy_fnc,
208
+ deployment_definition,
209
+ fm,
210
+ function_editor,
211
+ hw_selection_table,
212
+ mo,
213
+ purge_tabs,
214
+ sc_m,
215
+ schema_editors,
216
+ selection_table,
217
+ upload_func,
218
+ ):
219
+ s1 = mo.md(f'''
220
+ ###**Instantiate your watsonx.ai client:**
221
+
222
+ 1. Select a region from the dropdown menu
223
+
224
+ 2. Provide an IBM Cloud Apikey and watsonx.ai deployment space id
225
+
226
+ 3. Once you submit, the area with the code template will turn green if successful
227
+
228
+ 4. Select a base (provide baseline format) or example code function template
229
+
230
+ ---
231
+
232
+ {client_instantiation_form}
233
+
234
+ ---
235
+
236
+ {client_callout}
237
+
238
+ ''')
239
+
240
+ sc_tabs = mo.ui.tabs(
241
+ {
242
+ "Schema Option Selection": sc_m,
243
+ "Schema Definition": mo.md(f"""
244
+ ####**Edit the schema definitions you selected in the previous tab.**<br>
245
+ {schema_editors}"""),
246
+ }
247
+ )
248
+
249
+ s2 = mo.md(f'''###**Create your function from the template:**
250
+
251
+ 1. Use the code editor window to create a function to deploy
252
+ <br>
253
+ The function must:
254
+ <br>
255
+ --- Include a payload and score element
256
+ <br>
257
+ --- Have the same function name in both the score = <name>() segment and the Function Name input field below
258
+ <br>
259
+ --- Additional details can be found here -> [watsonx.ai - Writing deployable Python functions
260
+ ](https://dataplatform.cloud.ibm.com/docs/content/wsj/analyze-data/ml-deploy-py-function-write.html?utm_medium=Exinfluencer&utm_source=ibm_developer&utm_content=in_content_link&utm_term=10006555&utm_id=blogs_awb-tekton-optimizations-for-kubeflow-pipelines-2-0&context=wx&audience=wdp)
261
+
262
+ 3. Click submit, then proceed to select whether you wish to add:
263
+ <br>
264
+ --- An input schema (describing the format of the variables the function takes) **[Optional]**
265
+ <br>
266
+ --- An output schema (describing the format of the output results the function returns) **[Optional]**
267
+ <br>
268
+ --- An sample input example (showing an example of a mapping of the input and output schema to actual values.) **[Optional]**
269
+
270
+ 4. Fill in the function name field **(must be exactly the same as in the function editor)**
271
+
272
+ 5. Add a description and metadata tags **[Optional]**
273
+
274
+ ---
275
+
276
+ {function_editor}
277
+
278
+ ---
279
+
280
+ {sc_tabs}
281
+
282
+ ---
283
+
284
+ {fm}
285
+
286
+ ''')
287
+
288
+ s3 = mo.md(f'''
289
+ ###**Review and Upload your function**
290
+
291
+ 1. Review the function metadata specs JSON
292
+
293
+ 2. Select a software specification if necessary (default for python functions is pre-selected), this is the runtime environment of python that your function will run in. Environments on watsonx.ai come pre-packaged with many different libraries, if necessary install new ones by adding them into the function as a `subprocess.check_output('pip install <package_name>', shell=True)` command.
294
+
295
+ 3. Once your are satisfied, click the upload function button and wait for the response.
296
+
297
+ > If you see no table of software specs, you haven't activated your watsonx.ai client.
298
+
299
+ ---
300
+
301
+ {selection_table}
302
+
303
+ ---
304
+
305
+ {upload_func}
306
+
307
+ ''')
308
+
309
+ s4 = mo.md(f'''
310
+ ###**Deploy your function:**
311
+
312
+ 1. Select a hardware specification (vCPUs/GB) that you want your function deployed on
313
+ <br>
314
+ --- XXS and XS cost the same (0.5 CUH per hour, so XS is the better option
315
+ <br>
316
+ --- Select larger instances for more resource intensive tasks or runnable jobs
317
+
318
+ 2. Select the type of deployment:
319
+ <br>
320
+ --- Function (Online) for always-on endpoints - Always available and low latency, but consume resources continuously for every hour they are deployed.
321
+ <br>
322
+ --- Batch (Batch) for runnable jobs - Only consume resources during job runs, but aren't as flexible to deploy.
323
+
324
+ 3. If you've selected Function, pick a completely unique (globally, not just your account) deployment serving name that will be in the endpoint url.
325
+
326
+ 4. Once your are satisfied, click the deploy function button and wait for the response.
327
+
328
+ ---
329
+
330
+ {hw_selection_table}
331
+
332
+ ---
333
+
334
+ {deployment_definition}
335
+
336
+ ---
337
+
338
+ {deploy_fnc}
339
+
340
+ ''')
341
+
342
+ s5 = mo.md(f'''
343
+ ###**Helper Purge Functions:**
344
+
345
+ These functions help you retrieve and mass delete ***(WARNING: purges all at once)*** deployments, data assets or repository assets (functions, models, etc.) that you have in the deployment space. This is meant to support fast cleanup.
346
+
347
+ Select the tab based on what you want to delete, then click each of the buttons one by one after the previous gives a response.
348
+
349
+ ---
350
+
351
+ {purge_tabs}
352
+
353
+ ''')
354
+
355
+ sections = mo.accordion(
356
+ {
357
+ "Section 1: **watsonx.ai Credentials**": s1,
358
+ "Section 2: **Function Creation**": s2,
359
+ "Section 3: **Function Upload**": s3,
360
+ "Section 4: **Function Deployment**": s4,
361
+ "Section 5: **Helper Functions**": s5,
362
+ },
363
+ multiple=True
364
+ )
365
+
366
+ sections
367
+ return s1, s2, s3, s4, s5, sc_tabs, sections
368
+
369
+
370
+ @app.cell
371
+ def _(mo, template_variant):
372
+ # Template for WatsonX.ai deployable function
373
+ if template_variant.value == "Stream Files to IBM COS [Example]":
374
+ with open("stream_files_to_cos.py", "r") as file:
375
+ template = file.read()
376
+ else:
377
+ template = '''def your_function_name():
378
+
379
+ import subprocess
380
+ subprocess.check_output('pip install gensim', shell=True)
381
+ import gensim
382
+
383
+ def score(input_data):
384
+ message_from_input_payload = payload.get("input_data")[0].get("values")[0][0]
385
+ response_message = "Received message - {0}".format(message_from_input_payload)
386
+
387
+ # Score using the pre-defined model
388
+ score_response = {
389
+ 'predictions': [{'fields': ['Response_message_field', 'installed_lib_version'],
390
+ 'values': [[response_message, gensim.__version__]]
391
+ }]
392
+ }
393
+ return score_response
394
+
395
+ return score
396
+
397
+ score = your_function_name()
398
+ '''
399
+
400
+ function_editor = (
401
+ mo.md('''
402
+ #### **Create your function by editing the template:**
403
+
404
+ {editor}
405
+
406
+ ''')
407
+ .batch(
408
+ editor = mo.ui.code_editor(value=template, language="python", min_height=50)
409
+ )
410
+ .form(show_clear_button=True, bordered=False)
411
+ )
412
+
413
+ # function_editor
414
+ return file, function_editor, template
415
+
416
+
417
+ @app.cell
418
+ def _(function_editor, mo, os):
419
+ if function_editor.value:
420
+ # Get the edited code from the function editor
421
+ code = function_editor.value['editor']
422
+ # Create a namespace to execute the code in
423
+ namespace = {}
424
+ # Execute the code
425
+ exec(code, namespace)
426
+
427
+ # Find the first function defined in the namespace
428
+ function_name = None
429
+ for name, obj in namespace.items():
430
+ if callable(obj) and name != "__builtins__":
431
+ function_name = name
432
+ break
433
+
434
+ if function_name:
435
+ # Instantiate the deployable function
436
+ deployable_function = namespace[function_name]
437
+ # Now deployable_function contains the score function
438
+ mo.md(f"Created deployable function from '{function_name}'")
439
+ # Create the directory if it doesn't exist
440
+ save_dir = "/tmp/notebook_functions"
441
+ os.makedirs(save_dir, exist_ok=True)
442
+ # Save the function code to a file
443
+ file_path = os.path.join(save_dir, f"{function_name}.py")
444
+ with open(file_path, "w") as f:
445
+ f.write(code)
446
+ else:
447
+ mo.md("No function found in the editor code")
448
+ return (
449
+ code,
450
+ deployable_function,
451
+ f,
452
+ file_path,
453
+ function_name,
454
+ name,
455
+ namespace,
456
+ obj,
457
+ save_dir,
458
+ )
459
+
460
+
461
+ @app.cell
462
+ def _(deployment_client, mo, pd):
463
+ if deployment_client:
464
+ supported_specs = deployment_client.software_specifications.list()[
465
+ deployment_client.software_specifications.list()['STATE'] == 'supported'
466
+ ]
467
+
468
+ # Reset the index to start from 0
469
+ supported_specs = supported_specs.reset_index(drop=True)
470
+
471
+ # Create a mapping dictionary for framework names based on software specifications
472
+ framework_mapping = {
473
+ "tensorflow_rt24.1-py3.11": "TensorFlow",
474
+ "pytorch-onnx_rt24.1-py3.11": "PyTorch",
475
+ "onnxruntime_opset_19": "ONNX or ONNXRuntime",
476
+ "runtime-24.1-py3.11": "AI Services/Python Functions/Python Scripts",
477
+ "autoai-ts_rt24.1-py3.11": "AutoAI",
478
+ "autoai-kb_rt24.1-py3.11": "AutoAI",
479
+ "runtime-24.1-py3.11-cuda": "CUDA-enabled (GPU) Python Runtime",
480
+ "runtime-24.1-r4.3": "R Runtime 4.3",
481
+ "spark-mllib_3.4": "Apache Spark 3.4",
482
+ "autoai-rag_rt24.1-py3.11": "AutoAI RAG"
483
+ }
484
+
485
+ # Define the preferred order for items to appear at the top
486
+ preferred_order = [
487
+ "runtime-24.1-py3.11",
488
+ "runtime-24.1-py3.11-cuda",
489
+ "runtime-24.1-r4.3",
490
+ "ai-service-v5-software-specification",
491
+ "autoai-rag_rt24.1-py3.11",
492
+ "autoai-ts_rt24.1-py3.11",
493
+ "autoai-kb_rt24.1-py3.11",
494
+ "tensorflow_rt24.1-py3.11",
495
+ "pytorch-onnx_rt24.1-py3.11",
496
+ "onnxruntime_opset_19",
497
+ "spark-mllib_3.4",
498
+ ]
499
+
500
+ # Create a new column for sorting
501
+ supported_specs['SORT_ORDER'] = supported_specs['NAME'].apply(
502
+ lambda x: preferred_order.index(x) if x in preferred_order else len(preferred_order)
503
+ )
504
+
505
+ # Sort the DataFrame by the new column
506
+ supported_specs = supported_specs.sort_values('SORT_ORDER').reset_index(drop=True)
507
+
508
+ # Drop the sorting column as it's no longer needed
509
+ supported_specs = supported_specs.drop(columns=['SORT_ORDER'])
510
+
511
+ # Drop the REPLACEMENT column if it exists and add NOTES column
512
+ if 'REPLACEMENT' in supported_specs.columns:
513
+ supported_specs = supported_specs.drop(columns=['REPLACEMENT'])
514
+
515
+ # Add NOTES column with framework information
516
+ supported_specs['NOTES'] = supported_specs['NAME'].map(framework_mapping).fillna("Other")
517
+
518
+ # Create a table with single-row selection
519
+ selection_table = mo.ui.table(
520
+ supported_specs,
521
+ selection="single", # Only allow selecting one row
522
+ label="#### **Select a supported software_spec runtime for your function asset** (For Python Functions select - *'runtime-24.1-py3.11'* ):",
523
+ initial_selection=[0], # Now selecting the first row, which should be runtime-24.1-py3.11
524
+ page_size=6
525
+ )
526
+ else:
527
+ sel_df = pd.DataFrame(
528
+ data=[["ID", "Activate deployment_client."]],
529
+ columns=["ID", "VALUE"]
530
+ )
531
+
532
+ selection_table = mo.ui.table(
533
+ sel_df,
534
+ selection="single", # Only allow selecting one row
535
+ label="You haven't activated the Deployment_Client",
536
+ initial_selection=[0]
537
+ )
538
+
539
+ # # Display the table
540
+ # mo.md(f"""---
541
+ # <br>
542
+ # <br>
543
+ # {selection_table}
544
+ # <br>
545
+ # <br>
546
+ # ---
547
+ # <br>
548
+ # <br>
549
+ # """)
550
+ return (
551
+ framework_mapping,
552
+ preferred_order,
553
+ sel_df,
554
+ selection_table,
555
+ supported_specs,
556
+ )
557
+
558
+
559
+ @app.cell
560
+ def _(mo):
561
+ input_schema_checkbox = mo.ui.checkbox(label="Add input schema (optional)")
562
+ output_schema_checkbox = mo.ui.checkbox(label="Add output schema (optional)")
563
+ sample_input_checkbox = mo.ui.checkbox(label="Add sample input example (optional)")
564
+ return input_schema_checkbox, output_schema_checkbox, sample_input_checkbox
565
+
566
+
567
+ @app.cell
568
+ def _(
569
+ input_schema_checkbox,
570
+ mo,
571
+ output_schema_checkbox,
572
+ sample_input_checkbox,
573
+ selection_table,
574
+ template_variant,
575
+ ):
576
+ if selection_table.value['ID'].iloc[0]:
577
+ # Create the input fields
578
+ if template_variant.value == "Stream Files to IBM COS [Example]":
579
+ fnc_nm = "stream_file_to_cos"
580
+ else:
581
+ fnc_nm = "your_function_name"
582
+
583
+ uploaded_function_name = mo.ui.text(placeholder="<Must be the same as the name in editor>", label="Function Name:", kind="text", value=f"{fnc_nm}", full_width=False)
584
+ tags_editor = mo.ui.array(
585
+ [mo.ui.text(placeholder="Metadata Tags..."), mo.ui.text(), mo.ui.text()],
586
+ label="Optional Metadata Tags"
587
+ )
588
+ software_spec = selection_table.value['ID'].iloc[0]
589
+
590
+ description_input = mo.ui.text_area(
591
+ placeholder="Write a description for your function...)",
592
+ label="Description",
593
+ max_length=256,
594
+ rows=5,
595
+ full_width=True
596
+ )
597
+
598
+
599
+ func_metadata=mo.hstack([
600
+ description_input,
601
+ mo.hstack([
602
+ uploaded_function_name,
603
+ tags_editor,
604
+ ], justify="start", gap=1, align="start", wrap=True)
605
+ ],
606
+ widths=[0.6,0.4],
607
+ gap=2.75
608
+ )
609
+
610
+ schema_metadata=mo.hstack([
611
+ input_schema_checkbox,
612
+ output_schema_checkbox,
613
+ sample_input_checkbox
614
+ ],
615
+ justify="center", gap=1, align="center", wrap=True
616
+ )
617
+
618
+ # Display the metadata inputs
619
+ # mo.vstack([
620
+ # func_metadata,
621
+ # mo.md("**Make sure to click the checkboxes before filling in descriptions and tags or they will reset.**"),
622
+ # schema_metadata
623
+ # ],
624
+ # align="center",
625
+ # gap=2
626
+ # )
627
+ fm = mo.vstack([
628
+ func_metadata,
629
+ ],
630
+ align="center",
631
+ gap=2
632
+ )
633
+ sc_m = mo.vstack([
634
+ schema_metadata,
635
+ mo.md("**Make sure to select the checkbox options before filling in descriptions and tags or they will reset.**")
636
+ ],
637
+ align="center",
638
+ gap=2
639
+ )
640
+ return (
641
+ description_input,
642
+ fm,
643
+ fnc_nm,
644
+ func_metadata,
645
+ sc_m,
646
+ schema_metadata,
647
+ software_spec,
648
+ tags_editor,
649
+ uploaded_function_name,
650
+ )
651
+
652
+
653
+ @app.cell
654
+ def _(json, mo, template_variant):
655
+ if template_variant.value == "Stream Files to IBM COS [Example]":
656
+ from cos_stream_schema_examples import input_schema, output_schema, sample_input
657
+ else:
658
+ input_schema = [
659
+ {
660
+ 'id': '1',
661
+ 'type': 'struct',
662
+ 'fields': [
663
+ {
664
+ 'name': '<variable name 1>',
665
+ 'type': 'string',
666
+ 'nullable': False,
667
+ 'metadata': {}
668
+ },
669
+ {
670
+ 'name': '<variable name 2>',
671
+ 'type': 'string',
672
+ 'nullable': False,
673
+ 'metadata': {}
674
+ }
675
+ ]
676
+ }
677
+ ]
678
+
679
+ output_schema = [
680
+ {
681
+ 'id': '1',
682
+ 'type': 'struct',
683
+ 'fields': [
684
+ {
685
+ 'name': '<output return name>',
686
+ 'type': 'string',
687
+ 'nullable': False,
688
+ 'metadata': {}
689
+ }
690
+ ]
691
+ }
692
+ ]
693
+
694
+ sample_input = {
695
+ 'input_data': [
696
+ {
697
+ 'fields': ['<variable name 1>', '<variable name 2>'],
698
+ 'values': [
699
+ ['<sample input value for variable 1>', '<sample input value for variable 2>']
700
+ ]
701
+ }
702
+ ]
703
+ }
704
+
705
+
706
+ input_schema_editor = mo.ui.code_editor(value=json.dumps(input_schema, indent=4), language="python", min_height=25)
707
+ output_schema_editor = mo.ui.code_editor(value=json.dumps(output_schema, indent=4), language="python", min_height=25)
708
+ sample_input_editor = mo.ui.code_editor(value=json.dumps(sample_input, indent=4), language="python", min_height=25)
709
+
710
+ schema_editors = mo.accordion(
711
+ {
712
+ """**Input Schema Metadata Editor**""": input_schema_editor,
713
+ """**Output Schema Metadata Editor**""": output_schema_editor,
714
+ """**Sample Input Metadata Editor**""": sample_input_editor
715
+ }, multiple=True
716
+ )
717
+
718
+ # schema_editors
719
+ return (
720
+ input_schema,
721
+ input_schema_editor,
722
+ output_schema,
723
+ output_schema_editor,
724
+ sample_input,
725
+ sample_input_editor,
726
+ schema_editors,
727
+ )
728
+
729
+
730
+ @app.cell
731
+ def _(
732
+ ast,
733
+ deployment_client,
734
+ description_input,
735
+ function_editor,
736
+ input_schema_checkbox,
737
+ input_schema_editor,
738
+ json,
739
+ mo,
740
+ os,
741
+ output_schema_checkbox,
742
+ output_schema_editor,
743
+ sample_input_checkbox,
744
+ sample_input_editor,
745
+ selection_table,
746
+ software_spec,
747
+ tags_editor,
748
+ uploaded_function_name,
749
+ ):
750
+ get_upload_status, set_upload_status = mo.state("No uploads yet")
751
+
752
+ function_meta = {}
753
+
754
+ if selection_table.value['ID'].iloc[0] and deployment_client is not None:
755
+ # Start with the base required fields
756
+ function_meta = {
757
+ deployment_client.repository.FunctionMetaNames.NAME: f"{uploaded_function_name.value}" or "your_function_name",
758
+ deployment_client.repository.FunctionMetaNames.SOFTWARE_SPEC_ID: software_spec or "45f12dfe-aa78-5b8d-9f38-0ee223c47309"
759
+ }
760
+
761
+ # Add optional fields if they exist
762
+ if tags_editor.value:
763
+ # Filter out empty strings from the tags list
764
+ filtered_tags = [tag for tag in tags_editor.value if tag and tag.strip()]
765
+ if filtered_tags: # Only add if there are non-empty tags
766
+ function_meta[deployment_client.repository.FunctionMetaNames.TAGS] = filtered_tags
767
+
768
+
769
+ if description_input.value:
770
+ function_meta[deployment_client.repository.FunctionMetaNames.DESCRIPTION] = description_input.value
771
+
772
+ # Add input schema if checkbox is checked
773
+ if input_schema_checkbox.value:
774
+ try:
775
+ function_meta[deployment_client.repository.FunctionMetaNames.INPUT_DATA_SCHEMAS] = json.loads(input_schema_editor.value)
776
+ except json.JSONDecodeError:
777
+ # If JSON parsing fails, try Python literal evaluation as fallback
778
+ function_meta[deployment_client.repository.FunctionMetaNames.INPUT_DATA_SCHEMAS] = ast.literal_eval(input_schema_editor.value)
779
+
780
+ # Add output schema if checkbox is checked
781
+ if output_schema_checkbox.value:
782
+ try:
783
+ function_meta[deployment_client.repository.FunctionMetaNames.OUTPUT_DATA_SCHEMAS] = json.loads(output_schema_editor.value)
784
+ except json.JSONDecodeError:
785
+ # If JSON parsing fails, try Python literal evaluation as fallback
786
+ function_meta[deployment_client.repository.FunctionMetaNames.OUTPUT_DATA_SCHEMAS] = ast.literal_eval(output_schema_editor.value)
787
+
788
+ # Add sample input if checkbox is checked
789
+ if sample_input_checkbox.value:
790
+ try:
791
+ function_meta[deployment_client.repository.FunctionMetaNames.SAMPLE_SCORING_INPUT] = json.loads(sample_input_editor.value)
792
+ except json.JSONDecodeError:
793
+ # If JSON parsing fails, try Python literal evaluation as fallback
794
+ function_meta[deployment_client.repository.FunctionMetaNames.SAMPLE_SCORING_INPUT] = ast.literal_eval(sample_input_editor.value)
795
+
796
+ def upload_function(function_meta, use_function_object=True):
797
+ """
798
+ Uploads a Python function to watsonx.ai as a deployable asset.
799
+ Parameters:
800
+ function_meta (dict): Metadata for the function
801
+ use_function_object (bool): Whether to use function object (True) or file path (False)
802
+ Returns:
803
+ dict: Details of the uploaded function
804
+ """
805
+ # Store the original working directory
806
+ original_dir = os.getcwd()
807
+
808
+ try:
809
+ # Create temp file from the code in the editor
810
+ code_to_deploy = function_editor.value['editor']
811
+ # This function is defined elsewhere in the notebook
812
+ func_name = uploaded_function_name.value or "your_function_name"
813
+ # Ensure function_meta has the correct function name
814
+ function_meta[deployment_client.repository.FunctionMetaNames.NAME] = func_name
815
+ # Save the file locally first
816
+ save_dir = "/tmp/notebook_functions"
817
+ os.makedirs(save_dir, exist_ok=True)
818
+ file_path = f"{save_dir}/{func_name}.py"
819
+ with open(file_path, "w", encoding="utf-8") as f:
820
+ f.write(code_to_deploy)
821
+
822
+ if use_function_object:
823
+ # Import the function from the file
824
+ import sys
825
+ import importlib.util
826
+ # Add the directory to Python's path
827
+ sys.path.append(save_dir)
828
+ # Import the module
829
+ spec = importlib.util.spec_from_file_location(func_name, file_path)
830
+ module = importlib.util.module_from_spec(spec)
831
+ spec.loader.exec_module(module)
832
+ # Get the function object
833
+ function_object = getattr(module, func_name)
834
+
835
+ # Change to /tmp directory before calling IBM Watson SDK functions
836
+ os.chdir('/tmp')
837
+
838
+ # Upload the function object
839
+ mo.md(f"Uploading function object: {func_name}")
840
+ func_details = deployment_client.repository.store_function(function_object, function_meta)
841
+ else:
842
+ # Change to /tmp directory before calling IBM Watson SDK functions
843
+ os.chdir('/tmp')
844
+
845
+ # Upload using the file path approach
846
+ mo.md(f"Uploading function from file: {file_path}")
847
+ func_details = deployment_client.repository.store_function(file_path, function_meta)
848
+
849
+ set_upload_status(f"Latest Upload - id - {func_details['metadata']['id']}")
850
+ return func_details
851
+ except Exception as e:
852
+ set_upload_status(f"Error uploading function: {str(e)}")
853
+ mo.md(f"Detailed error: {str(e)}")
854
+ raise
855
+ finally:
856
+ # Always change back to the original directory, even if an exception occurs
857
+ os.chdir(original_dir)
858
+
859
+ upload_status = mo.state("No uploads yet")
860
+
861
+ upload_button = mo.ui.button(
862
+ label="Upload Function",
863
+ on_click=lambda _: upload_function(function_meta, use_function_object=True),
864
+ kind="success",
865
+ tooltip="Click to upload function to watsonx.ai"
866
+ )
867
+
868
+ # function_meta
869
+ return (
870
+ filtered_tags,
871
+ function_meta,
872
+ get_upload_status,
873
+ set_upload_status,
874
+ upload_button,
875
+ upload_function,
876
+ upload_status,
877
+ )
878
+
879
+
880
+ @app.cell
881
+ def _(get_upload_status, mo, upload_button):
882
+ # Upload your function
883
+ if upload_button.value:
884
+ try:
885
+ upload_result = upload_button.value
886
+ artifact_id = upload_result['metadata']['id']
887
+ except Exception as e:
888
+ mo.md(f"Error: {str(e)}")
889
+
890
+ upload_func = mo.vstack([
891
+ upload_button,
892
+ mo.md(f"**Status:** {get_upload_status()}")
893
+ ], justify="space-around", align="center")
894
+ return artifact_id, upload_func, upload_result
895
+
896
+
897
+ @app.cell
898
+ def _(deployment_client, mo, pd, upload_button, uuid):
899
+ def reorder_hardware_specifications(df):
900
+ """
901
+ Reorders a hardware specifications dataframe by type and size of environment
902
+ without hardcoding specific hardware types.
903
+
904
+ Parameters:
905
+ df (pandas.DataFrame): The hardware specifications dataframe to reorder
906
+
907
+ Returns:
908
+ pandas.DataFrame: Reordered dataframe with reset index
909
+ """
910
+ # Create a copy to avoid modifying the original dataframe
911
+ result_df = df.copy()
912
+
913
+ # Define a function to extract the base type and size
914
+ def get_sort_key(name):
915
+ # Create a custom ordering list
916
+ custom_order = [
917
+ "XXS", "XS", "S", "M", "L", "XL",
918
+ "XS-Spark", "S-Spark", "M-Spark", "L-Spark", "XL-Spark",
919
+ "K80", "K80x2", "K80x4",
920
+ "V100", "V100x2",
921
+ "WXaaS-XS", "WXaaS-S", "WXaaS-M", "WXaaS-L", "WXaaS-XL",
922
+ "Default Spark", "Notebook Default Spark", "ML"
923
+ ]
924
+
925
+ # If name is in the custom order list, use its index
926
+ if name in custom_order:
927
+ return (0, custom_order.index(name))
928
+
929
+ # For any name not in the custom order, put it at the end
930
+ return (1, name)
931
+
932
+ # Add a temporary column for sorting
933
+ result_df['sort_key'] = result_df['NAME'].apply(get_sort_key)
934
+
935
+ # Sort the dataframe and drop the temporary column
936
+ result_df = result_df.sort_values('sort_key').drop('sort_key', axis=1)
937
+
938
+ # Reset the index
939
+ result_df = result_df.reset_index(drop=True)
940
+
941
+ return result_df
942
+
943
+ if deployment_client and upload_button.value:
944
+
945
+ hardware_specs = deployment_client.hardware_specifications.list()
946
+ hardware_specs_df = reorder_hardware_specifications(hardware_specs)
947
+
948
+ # Create a table with single-row selection
949
+ hw_selection_table = mo.ui.table(
950
+ hardware_specs_df,
951
+ selection="single", # Only allow selecting one row
952
+ label="#### **Select a supported hardware_specification for your deployment** *(Default: 'XS' - 1vCPU_4GB Ram)*",
953
+ initial_selection=[1],
954
+ page_size=6,
955
+ wrapped_columns=['DESCRIPTION']
956
+ )
957
+
958
+ deployment_type = mo.ui.radio(
959
+ options={"Function":"Online (Function Endpoint)","Runnable Job":"Batch (Runnable Jobs)"}, value="Function", label="Select the Type of Deployment:", inline=True
960
+ )
961
+ uuid_suffix = str(uuid.uuid4())[:4]
962
+
963
+ deployment_name = mo.ui.text(value=f"deployed_func_{uuid_suffix}", label="Deployment Name:", placeholder="<Must be completely unique>")
964
+ else:
965
+ hw_df = pd.DataFrame(
966
+ data=[["ID", "Activate deployment_client."]],
967
+ columns=["ID", "VALUE"]
968
+ )
969
+
970
+ hw_selection_table = mo.ui.table(
971
+ hw_df,
972
+ selection="single", # Only allow selecting one row
973
+ label="You haven't activated the Deployment_Client",
974
+ initial_selection=[0]
975
+ )
976
+
977
+
978
+ # mo.md(f"""
979
+ # <br>
980
+ # <br>
981
+ # {upload_func}
982
+ # <br>
983
+ # <br>
984
+ # ---
985
+ # {hw_selection_table}
986
+ # <br>
987
+ # <br>
988
+
989
+
990
+ # """)
991
+ return (
992
+ deployment_name,
993
+ deployment_type,
994
+ hardware_specs,
995
+ hardware_specs_df,
996
+ hw_df,
997
+ hw_selection_table,
998
+ reorder_hardware_specifications,
999
+ uuid_suffix,
1000
+ )
1001
+
1002
+
1003
+ @app.cell
1004
+ def _(
1005
+ artifact_id,
1006
+ deployment_client,
1007
+ deployment_details,
1008
+ deployment_name,
1009
+ deployment_type,
1010
+ hw_selection_table,
1011
+ mo,
1012
+ print,
1013
+ upload_button,
1014
+ ):
1015
+ def deploy_function(artifact_id, deployment_type):
1016
+ """
1017
+ Deploys a function asset to watsonx.ai.
1018
+
1019
+ Parameters:
1020
+ artifact_id (str): ID of the function artifact to deploy
1021
+ deployment_type (object): Type of deployment (online or batch)
1022
+
1023
+ Returns:
1024
+ dict: Details of the deployed function
1025
+ """
1026
+ if not artifact_id:
1027
+ print("Error: No artifact ID provided. Please upload a function first.")
1028
+ return None
1029
+
1030
+ if deployment_type.value == "Online (Function Endpoint)": # Changed from "Online (Function Endpoint)"
1031
+ deployment_props = {
1032
+ deployment_client.deployments.ConfigurationMetaNames.NAME: deployment_name.value,
1033
+ deployment_client.deployments.ConfigurationMetaNames.ONLINE: {},
1034
+ deployment_client.deployments.ConfigurationMetaNames.HARDWARE_SPEC: {"id": selected_hw_config},
1035
+ deployment_client.deployments.ConfigurationMetaNames.SERVING_NAME: deployment_name.value,
1036
+ }
1037
+ else: # "Runnable Job" instead of "Batch (Runnable Jobs)"
1038
+ deployment_props = {
1039
+ deployment_client.deployments.ConfigurationMetaNames.NAME: deployment_name.value,
1040
+ deployment_client.deployments.ConfigurationMetaNames.BATCH: {},
1041
+ deployment_client.deployments.ConfigurationMetaNames.HARDWARE_SPEC: {"id": selected_hw_config},
1042
+ # batch does not use serving names
1043
+ }
1044
+
1045
+ try:
1046
+ print(deployment_props)
1047
+ # First, get the asset details to confirm it exists
1048
+ asset_details = deployment_client.repository.get_details(artifact_id)
1049
+ print(f"Asset found: {asset_details['metadata']['name']} with ID: {asset_details['metadata']['id']}")
1050
+
1051
+ # Create the deployment
1052
+ deployed_function = deployment_client.deployments.create(artifact_id, deployment_props)
1053
+ print(f"Creating deployment from Asset: {artifact_id} with deployment properties {str(deployment_props)}")
1054
+ return deployed_function
1055
+ except Exception as e:
1056
+ print(f"Deployment error: {str(e)}")
1057
+ return None
1058
+
1059
+ def get_deployment_id(deployed_function):
1060
+ deployment_id = deployment_client.deployments.get_uid(deployment_details)
1061
+ return deployment_id
1062
+
1063
+ def get_deployment_info(deployment_id):
1064
+ deployment_info = deployment_client.deployments.get_details(deployment_id)
1065
+ return deployment_info
1066
+
1067
+ deployment_status = mo.state("No deployments yet")
1068
+
1069
+ if hw_selection_table.value['ID'].iloc[0]:
1070
+ selected_hw_config = hw_selection_table.value['ID'].iloc[0]
1071
+
1072
+ deploy_button = mo.ui.button(
1073
+ label="Deploy Function",
1074
+ on_click=lambda _: deploy_function(artifact_id, deployment_type),
1075
+ kind="success",
1076
+ tooltip="Click to deploy function to watsonx.ai"
1077
+ )
1078
+
1079
+ if deployment_client and upload_button.value:
1080
+ deployment_definition = mo.hstack([
1081
+ deployment_type,
1082
+ deployment_name
1083
+ ], justify="space-around")
1084
+ else:
1085
+ deployment_definition = mo.hstack([
1086
+ "No Deployment Type Selected",
1087
+ "No Deployment Name Provided"
1088
+ ], justify="space-around")
1089
+
1090
+ # deployment_definition
1091
+ return (
1092
+ deploy_button,
1093
+ deploy_function,
1094
+ deployment_definition,
1095
+ deployment_status,
1096
+ get_deployment_id,
1097
+ get_deployment_info,
1098
+ selected_hw_config,
1099
+ )
1100
+
1101
+
1102
+ @app.cell
1103
+ def _(deploy_button, deployment_definition, mo):
1104
+ _ = deployment_definition
1105
+
1106
+ deploy_fnc = mo.vstack([
1107
+ deploy_button,
1108
+ deploy_button.value
1109
+ ], justify="space-around", align="center")
1110
+
1111
+ # mo.md(f"""
1112
+ # {deployment_definition}
1113
+ # <br>
1114
+ # <br>
1115
+ # {deploy_fnc}
1116
+
1117
+ # ---
1118
+ # """)
1119
+ return (deploy_fnc,)
1120
+
1121
+
1122
+ @app.cell(hide_code=True)
1123
+ def _(deployment_client, mo):
1124
+ ### Functions to List , Get ID's as a list and Purge of Assets
1125
+
1126
+ def get_deployment_list():
1127
+ deployment_df = deployment_client.deployments.list()
1128
+ return deployment_df
1129
+
1130
+ def get_deployment_ids(df):
1131
+ dep_list = df['ID'].tolist()
1132
+ return dep_list
1133
+
1134
+ def get_data_assets_list():
1135
+ data_assets_df = deployment_client.data_assets.list()
1136
+ return data_assets_df
1137
+
1138
+ def get_data_asset_ids(df):
1139
+ data_asset_list = df['ASSET_ID'].tolist()
1140
+ return data_asset_list
1141
+
1142
+ ### List Repository Assets, Get ID's as a list and Purge Repository Assets (AI Services, Functions, Models, etc.)
1143
+ def get_repository_list():
1144
+ repository_df = deployment_client.repository.list()
1145
+ return repository_df
1146
+
1147
+ def get_repository_ids(df):
1148
+ repository_list = df['ID'].tolist()
1149
+ return repository_list
1150
+
1151
+ def delete_with_progress(ids_list, delete_function, item_type="items"):
1152
+ """
1153
+ Generic wrapper that adds a progress bar to any deletion function
1154
+
1155
+ Parameters:
1156
+ ids_list: List of IDs to delete
1157
+ delete_function: Function that deletes a single ID
1158
+ item_type: String describing what's being deleted (for display)
1159
+ """
1160
+ with mo.status.progress_bar(
1161
+ total=len(ids_list) or 1,
1162
+ title=f"Purging {item_type}",
1163
+ subtitle=f"Deleting {item_type}...",
1164
+ completion_title="Purge Complete",
1165
+ completion_subtitle=f"Successfully deleted {len(ids_list)} {item_type}"
1166
+ ) as progress:
1167
+ for item_id in ids_list:
1168
+ delete_function(item_id)
1169
+ progress.update(increment=1)
1170
+ return f"Deleted {len(ids_list)} {item_type} successfully"
1171
+
1172
+ # Use with existing deletion functions
1173
+ def delete_deployments(deployment_ids):
1174
+ return delete_with_progress(
1175
+ deployment_ids,
1176
+ lambda id: deployment_client.deployments.delete(id),
1177
+ "deployments"
1178
+ )
1179
+
1180
+ def delete_data_assets(data_asset_ids):
1181
+ return delete_with_progress(
1182
+ data_asset_ids,
1183
+ lambda id: deployment_client.data_assets.delete(id),
1184
+ "data assets"
1185
+ )
1186
+
1187
+ def delete_repository_items(repository_ids):
1188
+ return delete_with_progress(
1189
+ repository_ids,
1190
+ lambda id: deployment_client.repository.delete(id),
1191
+ "repository items"
1192
+ )
1193
+ return (
1194
+ delete_data_assets,
1195
+ delete_deployments,
1196
+ delete_repository_items,
1197
+ delete_with_progress,
1198
+ get_data_asset_ids,
1199
+ get_data_assets_list,
1200
+ get_deployment_ids,
1201
+ get_deployment_list,
1202
+ get_repository_ids,
1203
+ get_repository_list,
1204
+ )
1205
+
1206
+
1207
+ @app.cell
1208
+ def _(get_deployment_id_list, get_deployments_button, mo, purge_deployments):
1209
+ deployments_purge_stack = mo.hstack([get_deployments_button, get_deployment_id_list, purge_deployments])
1210
+ deployments_purge_stack_results = mo.vstack([get_deployments_button.value, get_deployment_id_list.value, purge_deployments.value])
1211
+
1212
+ deployments_purge_tab = mo.vstack([deployments_purge_stack, deployments_purge_stack_results])
1213
+ return (
1214
+ deployments_purge_stack,
1215
+ deployments_purge_stack_results,
1216
+ deployments_purge_tab,
1217
+ )
1218
+
1219
+
1220
+ @app.cell
1221
+ def _(get_repository_button, get_repository_id_list, mo, purge_repository):
1222
+ repository_purge_stack = mo.hstack([get_repository_button, get_repository_id_list, purge_repository])
1223
+
1224
+ repository_purge_stack_results = mo.vstack([get_repository_button.value, get_repository_id_list.value, purge_repository.value])
1225
+
1226
+ repository_purge_tab = mo.vstack([repository_purge_stack, repository_purge_stack_results])
1227
+ return (
1228
+ repository_purge_stack,
1229
+ repository_purge_stack_results,
1230
+ repository_purge_tab,
1231
+ )
1232
+
1233
+
1234
+ @app.cell
1235
+ def _(get_data_asset_id_list, get_data_assets_button, mo, purge_data_assets):
1236
+ data_assets_purge_stack = mo.hstack([get_data_assets_button, get_data_asset_id_list, purge_data_assets])
1237
+ data_assets_purge_stack_results = mo.vstack([get_data_assets_button.value, get_data_asset_id_list.value, purge_data_assets.value])
1238
+
1239
+ data_assets_purge_tab = mo.vstack([data_assets_purge_stack, data_assets_purge_stack_results])
1240
+ return (
1241
+ data_assets_purge_stack,
1242
+ data_assets_purge_stack_results,
1243
+ data_assets_purge_tab,
1244
+ )
1245
+
1246
+
1247
+ @app.cell
1248
+ def _(data_assets_purge_tab, deployments_purge_tab, mo, repository_purge_tab):
1249
+ purge_tabs = mo.ui.tabs(
1250
+ {"Purge Deployments": deployments_purge_tab, "Purge Repository Assets": repository_purge_tab,"Purge Data Assets": data_assets_purge_tab }, lazy=False
1251
+ )
1252
+
1253
+ # asset_purge = mo.accordion(
1254
+ # {
1255
+ # """<br>
1256
+ # #### **Supporting Cleanup Functionality, lists of different assets and purge them if needed** *(purges all detected)*
1257
+ # <br>""": purge_tabs,
1258
+ # }
1259
+ # )
1260
+
1261
+ # asset_purge
1262
+ return (purge_tabs,)
1263
+
1264
+
1265
+ @app.cell(hide_code=True)
1266
+ def _(
1267
+ delete_data_assets,
1268
+ delete_deployments,
1269
+ delete_repository_items,
1270
+ get_data_asset_ids,
1271
+ get_data_assets_list,
1272
+ get_deployment_ids,
1273
+ get_deployment_list,
1274
+ get_repository_ids,
1275
+ get_repository_list,
1276
+ mo,
1277
+ ):
1278
+ ### Temporary Function Purge - Assets
1279
+ get_data_assets_button = mo.ui.button(
1280
+ label="Get Data Assets Dataframe",
1281
+ on_click=lambda _: get_data_assets_list(),
1282
+ kind="neutral",
1283
+ )
1284
+
1285
+ get_data_asset_id_list = mo.ui.button(
1286
+ label="Turn Dataframe into List of IDs",
1287
+ on_click=lambda _: get_data_asset_ids(get_data_assets_button.value),
1288
+ kind="neutral",
1289
+ )
1290
+
1291
+ purge_data_assets = mo.ui.button(
1292
+ label="Purge Data Assets",
1293
+ on_click=lambda _: delete_data_assets(get_data_asset_id_list.value),
1294
+ kind="danger",
1295
+ )
1296
+
1297
+ ### Temporary Function Purge - Deployments
1298
+ get_deployments_button = mo.ui.button(
1299
+ label="Get Deployments Dataframe",
1300
+ on_click=lambda _: get_deployment_list(),
1301
+ kind="neutral",
1302
+ )
1303
+
1304
+ get_deployment_id_list = mo.ui.button(
1305
+ label="Turn Dataframe into List of IDs",
1306
+ on_click=lambda _: get_deployment_ids(get_deployments_button.value),
1307
+ kind="neutral",
1308
+ )
1309
+
1310
+ purge_deployments = mo.ui.button(
1311
+ label="Purge Deployments",
1312
+ on_click=lambda _: delete_deployments(get_deployment_id_list.value),
1313
+ kind="danger",
1314
+ )
1315
+
1316
+ ### Repository Items Purge
1317
+ get_repository_button = mo.ui.button(
1318
+ label="Get Repository Dataframe",
1319
+ on_click=lambda _: get_repository_list(),
1320
+ kind="neutral",
1321
+ )
1322
+
1323
+ get_repository_id_list = mo.ui.button(
1324
+ label="Turn Dataframe into List of IDs",
1325
+ on_click=lambda _: get_repository_ids(get_repository_button.value),
1326
+ kind="neutral",
1327
+ )
1328
+
1329
+ purge_repository = mo.ui.button(
1330
+ label="Purge Repository Items",
1331
+ on_click=lambda _: delete_repository_items(get_repository_id_list.value),
1332
+ kind="danger",
1333
+ )
1334
+ return (
1335
+ get_data_asset_id_list,
1336
+ get_data_assets_button,
1337
+ get_deployment_id_list,
1338
+ get_deployments_button,
1339
+ get_repository_button,
1340
+ get_repository_id_list,
1341
+ purge_data_assets,
1342
+ purge_deployments,
1343
+ purge_repository,
1344
+ )
1345
+
1346
+
1347
+ if __name__ == "__main__":
1348
+ app.run()