Spaces:
Running
Running
from dataclasses import dataclass | |
from enum import Enum | |
class Task: | |
benchmark: str | |
metric: str | |
col_name: str | |
# Select your tasks here | |
# --------------------------------------------------- | |
class Tasks(Enum): | |
# task_key in the json file, metric_key in the json file, name to display in the leaderboard | |
task0 = Task("acrostic", "EM", "Acrostic") | |
task1 = Task("crossword", "EM", "Crossword") | |
task2 = Task("cryptogram", "EM", "Cryptogram") | |
task3 = Task("logic_puzzle", "EM", "Logic Puzzle") | |
task4 = Task("sudoku", "EM", "Sudoku") | |
task5 = Task("drop_quote", "EM", "Drop Quote") | |
class Metric: | |
short: str | |
col_name: str | |
class Metrics(Enum): | |
CR = Metric("CR", "Completion Rate") | |
S_Acc = Metric("S-Acc", "Subtask Accuracy") | |
EM = Metric("EM", "Exact Match") | |
PM_05 = Metric("PM-0.5", "Partial Match (0.5)") | |
Tokens = Metric("Tokens", "Tokens") | |
NUM_FEWSHOT = 0 # Change with your few shot | |
# --------------------------------------------------- | |
# Your leaderboard name | |
TITLE = """<h1 align="center" id="space-title">LR<sup>2</sup>Bench: Evaluating Long-chain Reflective Reasoning Capabilities of Large Language Models via Constraint Satisfaction Problems</h1>""" | |
# What does your leaderboard evaluate? | |
INTRODUCTION_TEXT = """ | |
<strong>LR<sup>2</sup>Bench</strong> is a novel benchmark designed to evaluate the <strong>L</strong>ong-chain <strong>R</strong>eflective <strong>R</strong>easoning capabilities of LLMs. LR<sup>2</sup>Bench comprises 850 samples across six Constraint Satisfaction Problems (CSPs) where reflective reasoning is crucial for deriving solutions that meet all given constraints. Each type of task focuses on distinct constraint patterns, such as knowledge-based, logical, and spatial constraints, providing a comprehensive evaluation of diverse problem-solving scenarios. | |
<strong>Note:</strong> We have released the LR<sup>2</sup>Bench dataset <a href="https://github.com/Ultramarine-spec/LR2Bench">here</a>. For evaluation, you can submit your model's answer here following the submission guidelines. The Leaderboard will automatically evaluate the performance with rule-based matching. If you have further questions, please feel free to contact us at <a href="mailto:[email protected]">[email protected]</a>. | |
""" | |
TASK_TEXT = { | |
'Acrostic': 'The Acrostic task involves word clues like Crossword, but its objective is to form a hidden quotation or sentence from the answers to the clues. This requires that the answer words not only satisfy the corresponding clues but also effectively integrate to construct the ultimate hidden message. We collected 50 easy and 50 hard Acrostic samples from <a href="https://www.printable-puzzles.com/printable-acrostic-puzzles.php" target="_blank"> Printable Puzzles</a> with timestamps ranging from September 2024 to December 2024.', | |
'Crossword': 'The Crossword task requires inferring correct words from given clues and filling them into a grid. A key challenge lies in satisfying the constraint of shared letter intersections between horizontal and vertical words. We collected 150 Crossword samples published in 2024 from <a href="https://www.latimes.com" target="_blank"> Los Angeles Times</a> and <a href="https://www.vulture.com" target="_blank"> Vulture</a> in three sizes: $5\times5$, $10\times10$, and $15\times15$, with 50 ones for each size.', | |
'Logic_Puzzle': 'The Logic Puzzle task constitutes a problem that necessitates logical reasoning to deduce relationships between a set of entities based on the given constraints and clues. The objective is to systematically analyze the given information, employing techniques such as hypothesis formation, elimination, and deductive inference, to determine a unique solution that satisfies all given constraints. We collected 50 puzzles for each of the four sizes ($4\times4$, $4\times5$, $4\times6$, and $4\times7$) from <a href="https://www.printable-puzzles.com/printable-logic-puzzles.php" target="_blank"> Printable Puzzles</a>, with timestamps ranging from September 2024 to December 2024.', | |
'Cryptogram': 'The Cryptogram task involves the decryption of an encrypted quotation or sentence, where each letter of an original text is substituted with another, resulting in an apparently nonsense text. Decryption requires identifying patterns, common letter frequencies, and word structures to deduce the letter-to-letter correspondences, ultimately reconstructing the original content. We collected 50 easy and 50 hard samples from <a href="https://www.printable-puzzles.com/printable-cryptograms.php" target="_blank"> Printable Puzzles</a> with timestamps ranging from September 2024 to December 2024.', | |
'Sudoku': 'The Sudoku task consists of filling a $n^2 \times n^2$ grid with digits from 1 to $n^2$, subject to the constraint that each row, column, and $n \times n$ subgrid contains all digits from 1 to $n^2$ without repetition. Success in Sudoku relies on logical deduction and careful consideration of the existing digits to determine valid placements for the remaining numbers. From <a href="https://1sudoku.com" target="_blank"> 1sudoku</a>, we collected 200 Sudoku samples in total: 50 easy and 50 hard samples for both $4\times4$ and $9\times9$ sizes.', | |
'Drop_Quote': 'The Drop Quote task comprises a grid of multiple rows and columns, with each column providing a set of candidate letters. The task requires determining the correct row for letters in each column, effectively "dropping" it into target place to reveal the hidden quotation. We created 50 easy samples by manually compiling common quotations, and collected 50 hard samples from <a href="https://www.printable-puzzles.com/printable-drop-quotes.php" target="_blank"> Printable Puzzles</a>, with timestamps ranging from September 2024 to December 2024.' | |
} | |
SUBMIT_TEMPLATE = """ | |
```python | |
{ | |
"config": { | |
"model_name": "deepseek-ai/DeepSeek-R1", # your model name | |
"link": "https://huggingface.co/deepseek-ai/DeepSeek-R1", # your model link if available | |
"Params": 671, # number of parameters if available | |
"show_on_leaderboard": true, # whether to show your model on the leaderboard | |
}, | |
"results": { | |
"crossword": [ | |
{"tag": "TAG", "level": "LEVEL", "answer": "ANSWER"}, | |
], | |
"acrostic": [ | |
{"tag": "TAG", "level": "LEVEL", "answer": "ANSWER"}, | |
], | |
"logic": [ | |
{"tag": "TAG", "level": "LEVEL", "answer": "ANSWER"}, | |
], | |
"cryptogram": [ | |
{"tag": "TAG", "level": "LEVEL", "answer": "ANSWER"}, | |
], | |
"sudoku": [ | |
{"tag": "TAG", "level": "LEVEL", "answer": "ANSWER"}, | |
], | |
"drop": [ | |
{"tag": "TAG", "level": "LEVEL", "answer": "ANSWER"}, | |
] | |
} | |
} | |
``` | |
""" | |
# Which evaluations are you running? how can people reproduce what you have? | |
LLM_BENCHMARKS_TEXT = f""" | |
## How it works | |
## Reproducibility | |
To reproduce our results, here is the commands you can run: | |
""" | |
EVALUATION_QUEUE_TEXT = """ | |
## Some good practices before submitting a model | |
### 1) Make sure you can load your model and tokenizer using AutoClasses: | |
```python | |
from transformers import AutoConfig, AutoModel, AutoTokenizer | |
config = AutoConfig.from_pretrained("your model name", revision=revision) | |
model = AutoModel.from_pretrained("your model name", revision=revision) | |
tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision) | |
``` | |
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded. | |
Note: make sure your model is public! | |
Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted! | |
### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index) | |
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`! | |
### 3) Make sure your model has an open license! | |
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗 | |
### 4) Fill up your model card | |
When we add extra information about models to the leaderboard, it will be automatically taken from the model card | |
## In case of model failure | |
If your model is displayed in the `FAILED` category, its execution stopped. | |
Make sure you have followed the above steps first. | |
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task). | |
""" | |
CITATION_BUTTON_LABEL = "📙 Citation" | |
CITATION_BUTTON_TEXT = r""" | |
@article{chen2025lr, | |
title={LR^2 Bench: Evaluating Long-chain Reflective Reasoning Capabilities of Large Language Models via Constraint Satisfaction Problems}, | |
author={Chen, Jianghao and Wei, Zhenlin and Ren, Zhenjiang and Li, Ziyong and Zhang, Jiajun}, | |
journal={arXiv preprint arXiv:2502.17848}, | |
year={2025} | |
} | |
""".strip() |