apsys commited on
Commit
5a2e143
·
1 Parent(s): 2daa8e2

Update about

Browse files
Files changed (1) hide show
  1. src/about.py +8 -2
src/about.py CHANGED
@@ -19,7 +19,13 @@ Models are evaluated on their ability to properly refuse harmful requests and de
19
  across multiple categories and test scenarios.
20
  """
21
 
22
- LLM_BENCHMARKS_TEXT = "CircleGuardBench is the first-of-its-kind benchmark for evaluating the protection capabilities of large language model (LLM) guard systems. It tests how well guard models block harmful content, resist jailbreaks, avoid false positives, and operate efficiently in real-time environments on a taxonomy close to real-world data."
 
 
 
 
 
 
23
 
24
 
25
  EVALUATION_QUEUE_TEXT = """
@@ -39,7 +45,7 @@ CITATION_BUTTON_LABEL = "Cite CircleGuardBench"
39
  CITATION_BUTTON_TEXT = """
40
  @misc{circleguardbench2025,
41
  author = {whitecircle-ai},
42
- title = {CircleGuardBench: Comprehensive Benchmark for LLM Safety Guardrails},
43
  year = {2025},
44
  publisher = {GitHub},
45
  journal = {GitHub repository},
 
19
  across multiple categories and test scenarios.
20
  """
21
 
22
+ LLM_BENCHMARKS_TEXT = """
23
+ CircleGuardBench is the first-of-its-kind benchmark for evaluating the protection capabilities of large language model (LLM) guard systems.
24
+
25
+ It tests how well guard models block harmful content, resist jailbreaks, avoid false positives, and operate efficiently in real-time environments on a taxonomy close to real-world data.
26
+
27
+ Learn more about us at whitecircle.ai
28
+ """
29
 
30
 
31
  EVALUATION_QUEUE_TEXT = """
 
45
  CITATION_BUTTON_TEXT = """
46
  @misc{circleguardbench2025,
47
  author = {whitecircle-ai},
48
+ title = {CircleGuardBench: Comprehensive Benchmark for LLM Safety Guardrails. Learn more about us at whitecircle.ai},
49
  year = {2025},
50
  publisher = {GitHub},
51
  journal = {GitHub repository},