File size: 1,887 Bytes
d4d998a 2daa8e2 d4d998a 2daa8e2 b5d5c8b d4d998a 5a2e143 312a03b 5a2e143 d847be7 d4d998a a17bcda d4d998a 2daa8e2 d4d998a 2daa8e2 3a2adee d4d998a 00fe44f d4d998a 2daa8e2 d4d998a 2daa8e2 5a2e143 2daa8e2 d4d998a 2daa8e2 d4d998a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
"""
Text content for the GuardBench Leaderboard.
"""
TITLE = """
<div style="text-align: center; margin-bottom: 1rem">
<h1>CircleGuardBench Leaderboard</h1>
</div>
"""
INTRODUCTION_TEXT = """
## Introduction
CircleGuardBench is a comprehensive benchmark for evaluating the protection capabilities of large language model (LLM) guard systems.
This leaderboard tracks model performance across various safety categories, including harmful content detection,
jailbreak resistance, and more.
Models are evaluated on their ability to properly refuse harmful requests and detect problematic content
across multiple categories and test scenarios.
"""
LLM_BENCHMARKS_TEXT = """
CircleGuardBench is the first-of-its-kind benchmark for evaluating the protection capabilities of large language model (LLM) guard systems.
It tests how well guard models block harmful content, resist jailbreaks, avoid false positives, and operate efficiently in real-time environments on a taxonomy close to real-world data.
Learn more about us at [whitecircle.ai](https://whitecircle.ai)
"""
EVALUATION_QUEUE_TEXT = """
## Submit Your Model
To add your model to the CircleGuardBench leaderboard:
1. Run your evaluation using the CircleGuardBench framework at https://github.com/whitecircle-ai/circle-guard-bench
2. Upload your run results in .jsonl format using this form.
3. Once validated, your model will appear on the leaderboard.
### ✉️✨ Ready? Upload your results below!
"""
CITATION_BUTTON_LABEL = "Cite CircleGuardBench"
CITATION_BUTTON_TEXT = """
@misc{circleguardbench2025,
author = {whitecircle-ai},
title = {CircleGuardBench: Comprehensive Benchmark for LLM Safety Guardrails. Learn more about us at whitecircle.ai},
year = {2025},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\\url{https://github.com/whitecircle-ai/circle-guard-bench}}
}
"""
|