apsys commited on
Commit
3bbc6ce
·
1 Parent(s): f955e58
Files changed (1) hide show
  1. src/about.py +3 -3
src/about.py CHANGED
@@ -19,9 +19,9 @@ Models are evaluated on their ability to properly refuse harmful requests and de
19
  across multiple categories and test scenarios.
20
  """
21
 
22
- LLM_BENCHMARKS_TEXT = "GuardBench checks how well models handle safety challenges — from misinformation and self-harm to sexual content and corruption. "+\
23
- "Models are tested with regular and adversarial prompts to see if they can avoid saying harmful things. "+\
24
- "We track how accurate they are, how often they make mistakes, and how fast they respond."
25
 
26
 
27
  EVALUATION_QUEUE_TEXT = """
 
19
  across multiple categories and test scenarios.
20
  """
21
 
22
+ LLM_BENCHMARKS_TEXT = "GuardBench checks how well models handle safety challenges — from misinformation and self-harm to sexual content and corruption.\n"+\
23
+ "Models are tested with regular and adversarial prompts to see if they can avoid saying harmful things.\n"+\
24
+ "We track how accurate they are, how often they make mistakes, and how fast they respond.\n"
25
 
26
 
27
  EVALUATION_QUEUE_TEXT = """