Miro Goettler commited on
Commit
0ddc36e
·
1 Parent(s): 1455a96

Add info holder

Browse files
Files changed (1) hide show
  1. app.py +22 -13
app.py CHANGED
@@ -13,8 +13,8 @@ import llm
13
  from card import card
14
 
15
 
16
-
17
- grey = "#f0f0f0"
18
 
19
  # init page
20
  st.set_page_config(
@@ -74,14 +74,14 @@ for idx, level in enumerate(config.LEVELS):
74
  btn_submit_prompt = st.button(
75
  "Send prompt", key=f"submit_prompt_{level}"
76
  )
77
- output= None
78
  # Show response
79
  if len(txt) > 0 and btn_submit_prompt:
80
  st.session_state[f"prompt_try_count_{level}"] += 1
81
  with st.container(border=True):
82
  st.write("Response:")
83
  # special checks for certain levels
84
-
85
  if level == "llm_judge_input":
86
  invalid, output_raw = llm.run_judge(
87
  level, {"user_input": txt}
@@ -206,7 +206,7 @@ for idx, level in enumerate(config.LEVELS):
206
  icon="ℹ️",
207
  )
208
 
209
- hint_1_cont = card(color=grey)
210
  hint1 = hint_1_cont.toggle(
211
  "Show hint 1 - **Description of security strategy**",
212
  key=f"hint1_checkbox_{level}",
@@ -221,7 +221,7 @@ for idx, level in enumerate(config.LEVELS):
221
 
222
  hint_1_cont.write(config.LEVEL_DESCRIPTIONS[level]["info"])
223
 
224
- hint_2_cont = card(color=grey)
225
  hint2 = hint_2_cont.toggle(
226
  "Show hint 2 - **Backend code execution**",
227
  key=f"hint2_checkbox_{level}",
@@ -260,8 +260,8 @@ for idx, level in enumerate(config.LEVELS):
260
  hint_2_cont.code(val, language=None)
261
  hint_2_cont.write("The response of the LLM judge:")
262
  intermediate_output = st.session_state[
263
- f"intermediate_output_holder_{level}"
264
- ]
265
  if intermediate_output is None:
266
  hint_2_cont.warning("Please submit a prompt first.")
267
 
@@ -271,7 +271,7 @@ for idx, level in enumerate(config.LEVELS):
271
  hint_2_cont.write(
272
  f"The prompt was determined as **{'malicious' if invalid else 'not malicious'}** and therefor step 2 is executed."
273
  )
274
-
275
  hint_2_cont.write(
276
  "*Step 2:* If the user input is not classified as malicious, the prompt containing the actual secret is executed and the response is shown."
277
  )
@@ -331,8 +331,8 @@ for idx, level in enumerate(config.LEVELS):
331
  hint_2_cont.code(val, language=None)
332
  hint_2_cont.write("The response of the LLM judge:")
333
  intermediate_output = st.session_state[
334
- f"intermediate_output_holder_{level}"
335
- ]
336
  if intermediate_output is None:
337
  hint_2_cont.warning("Please submit a prompt first.")
338
  else:
@@ -417,7 +417,7 @@ for idx, level in enumerate(config.LEVELS):
417
  )
418
  show_base_prompt()
419
 
420
- hint_3_cont = card(color=grey)
421
 
422
  hint3 = hint_3_cont.toggle(
423
  "Show hint 3 - **Prompt solution example**",
@@ -438,6 +438,15 @@ for idx, level in enumerate(config.LEVELS):
438
  language=None,
439
  )
440
  hint_3_cont.info("*May not allways work")
 
 
 
 
 
 
 
 
 
441
 
442
 
443
  with st.expander("🏆 Record", expanded=True):
@@ -487,4 +496,4 @@ with st.expander("🏆 Record", expanded=True):
487
  # - story telling --> new field of study hard to be 100 percentage save
488
  # - switch to azure deployment --> currently not working under "GPT-4o"
489
  # - mark the user input with color in prompt
490
- # benefits and drawbacks, real world example
 
13
  from card import card
14
 
15
 
16
+ hint_color = "#fce08b"
17
+ info_color = "#bafc8b"
18
 
19
  # init page
20
  st.set_page_config(
 
74
  btn_submit_prompt = st.button(
75
  "Send prompt", key=f"submit_prompt_{level}"
76
  )
77
+ output = None
78
  # Show response
79
  if len(txt) > 0 and btn_submit_prompt:
80
  st.session_state[f"prompt_try_count_{level}"] += 1
81
  with st.container(border=True):
82
  st.write("Response:")
83
  # special checks for certain levels
84
+
85
  if level == "llm_judge_input":
86
  invalid, output_raw = llm.run_judge(
87
  level, {"user_input": txt}
 
206
  icon="ℹ️",
207
  )
208
 
209
+ hint_1_cont = card(color=hint_color)
210
  hint1 = hint_1_cont.toggle(
211
  "Show hint 1 - **Description of security strategy**",
212
  key=f"hint1_checkbox_{level}",
 
221
 
222
  hint_1_cont.write(config.LEVEL_DESCRIPTIONS[level]["info"])
223
 
224
+ hint_2_cont = card(color=hint_color)
225
  hint2 = hint_2_cont.toggle(
226
  "Show hint 2 - **Backend code execution**",
227
  key=f"hint2_checkbox_{level}",
 
260
  hint_2_cont.code(val, language=None)
261
  hint_2_cont.write("The response of the LLM judge:")
262
  intermediate_output = st.session_state[
263
+ f"intermediate_output_holder_{level}"
264
+ ]
265
  if intermediate_output is None:
266
  hint_2_cont.warning("Please submit a prompt first.")
267
 
 
271
  hint_2_cont.write(
272
  f"The prompt was determined as **{'malicious' if invalid else 'not malicious'}** and therefor step 2 is executed."
273
  )
274
+
275
  hint_2_cont.write(
276
  "*Step 2:* If the user input is not classified as malicious, the prompt containing the actual secret is executed and the response is shown."
277
  )
 
331
  hint_2_cont.code(val, language=None)
332
  hint_2_cont.write("The response of the LLM judge:")
333
  intermediate_output = st.session_state[
334
+ f"intermediate_output_holder_{level}"
335
+ ]
336
  if intermediate_output is None:
337
  hint_2_cont.warning("Please submit a prompt first.")
338
  else:
 
417
  )
418
  show_base_prompt()
419
 
420
+ hint_3_cont = card(color=hint_color)
421
 
422
  hint3 = hint_3_cont.toggle(
423
  "Show hint 3 - **Prompt solution example**",
 
438
  language=None,
439
  )
440
  hint_3_cont.info("*May not allways work")
441
+
442
+ info_cont = card(color=info_color)
443
+
444
+ info_toogle = info_cont.toggle(
445
+ "Show info",
446
+ key=f"info_checkbox_{level}",
447
+ )
448
+ if info_toogle:
449
+ info_cont.write("This is a demo to show the security levels of LLMs.")
450
 
451
 
452
  with st.expander("🏆 Record", expanded=True):
 
496
  # - story telling --> new field of study hard to be 100 percentage save
497
  # - switch to azure deployment --> currently not working under "GPT-4o"
498
  # - mark the user input with color in prompt
499
+ # benefits and drawbacks, real world example