File size: 1,702 Bytes
d4d998a b5d5c8b d4d998a b5d5c8b d4d998a a17bcda d4d998a a17bcda d4d998a 3a2adee d4d998a 00fe44f d4d998a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
"""
Text content for the GuardBench Leaderboard.
"""
TITLE = """
<div style="text-align: center; margin-bottom: 1rem">
<h1>GuardBench Leaderboard</h1>
</div>
"""
INTRODUCTION_TEXT = """
## Introduction
GuardBench is a comprehensive benchmark for evaluating the safety guardrails of large language models (LLMs).
This leaderboard tracks model performance across various safety categories, including harmful content detection,
jailbreak resistance, and more.
Models are evaluated on their ability to properly refuse harmful requests and detect problematic content
across multiple categories and test scenarios.
"""
LLM_BENCHMARKS_TEXT = """
GuardBench checks how well models handle safety challenges — from misinformation and self-harm to sexual content and corruption.
Models are tested with regular and adversarial prompts to see if they can avoid saying harmful things.
We track how accurate they are, how often they make mistakes, and how fast they respond.
"""
EVALUATION_QUEUE_TEXT = """
## Submit Your Model
To add your model to the GuardBench leaderboard:
1. Run your evaluation using the GuardBench framework at https://github.com/whitecircle-ai/guard-bench
2. Upload your run results in .jsonl format using this form.
3. Once validated, your model will appear on the leaderboard.
### ✉️✨ Ready? Upload your results below!
"""
CITATION_BUTTON_LABEL = "Cite GuardBench"
CITATION_BUTTON_TEXT = """
@misc{guardbench2023,
author = {GuardBench Team},
title = {GuardBench: Comprehensive Benchmark for LLM Safety Guardrails},
year = {2023},
publisher = {GitHub},
journal = {GitHub repository},
howpublished = {\\url{https://github.com/huggingface/guard-bench}}
}
"""
|