|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" SACREBLEU metric. """ |
|
|
|
import datasets |
|
import sacrebleu as scb |
|
from packaging import version |
|
|
|
import evaluate |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{post-2018-call, |
|
title = "A Call for Clarity in Reporting {BLEU} Scores", |
|
author = "Post, Matt", |
|
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", |
|
month = oct, |
|
year = "2018", |
|
address = "Belgium, Brussels", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://www.aclweb.org/anthology/W18-6319", |
|
pages = "186--191", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores. |
|
Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text. |
|
It also knows all the standard test sets and handles downloading, processing, and tokenization for you. |
|
|
|
See the [README.md] file at https://github.com/mjpost/sacreBLEU for more information. |
|
""" |
|
|
|
_KWARGS_DESCRIPTION = """ |
|
Produces BLEU scores along with its sufficient statistics |
|
from a source against one or more references. |
|
|
|
Args: |
|
predictions (`list` of `str`): list of translations to score. Each translation should be tokenized into a list of tokens. |
|
references (`list` of `list` of `str`): A list of lists of references. The contents of the first sub-list are the references for the first prediction, the contents of the second sub-list are for the second prediction, etc. Note that there must be the same number of references for each prediction (i.e. all sub-lists must be of the same length). |
|
smooth_method (`str`): The smoothing method to use, defaults to `'exp'`. Possible values are: |
|
- `'none'`: no smoothing |
|
- `'floor'`: increment zero counts |
|
- `'add-k'`: increment num/denom by k for n>1 |
|
- `'exp'`: exponential decay |
|
smooth_value (`float`): The smoothing value. Only valid when `smooth_method='floor'` (in which case `smooth_value` defaults to `0.1`) or `smooth_method='add-k'` (in which case `smooth_value` defaults to `1`). |
|
tokenize (`str`): Tokenization method to use for BLEU. If not provided, defaults to `'zh'` for Chinese, `'ja-mecab'` for Japanese and `'13a'` (mteval) otherwise. Possible values are: |
|
- `'none'`: No tokenization. |
|
- `'zh'`: Chinese tokenization. |
|
- `'13a'`: mimics the `mteval-v13a` script from Moses. |
|
- `'intl'`: International tokenization, mimics the `mteval-v14` script from Moses |
|
- `'char'`: Language-agnostic character-level tokenization. |
|
- `'ja-mecab'`: Japanese tokenization. Uses the [MeCab tokenizer](https://pypi.org/project/mecab-python3). |
|
lowercase (`bool`): If `True`, lowercases the input, enabling case-insensitivity. Defaults to `False`. |
|
force (`bool`): If `True`, insists that your tokenized input is actually detokenized. Defaults to `False`. |
|
use_effective_order (`bool`): If `True`, stops including n-gram orders for which precision is 0. This should be `True`, if sentence-level BLEU will be computed. Defaults to `False`. |
|
|
|
Returns: |
|
'score': BLEU score, |
|
'counts': Counts, |
|
'totals': Totals, |
|
'precisions': Precisions, |
|
'bp': Brevity penalty, |
|
'sys_len': predictions length, |
|
'ref_len': reference length, |
|
|
|
Examples: |
|
|
|
Example 1: |
|
>>> predictions = ["hello there general kenobi", "foo bar foobar"] |
|
>>> references = [["hello there general kenobi", "hello there !"], ["foo bar foobar", "foo bar foobar"]] |
|
>>> sacrebleu = evaluate.load("sacrebleu") |
|
>>> results = sacrebleu.compute(predictions=predictions, references=references) |
|
>>> print(list(results.keys())) |
|
['score', 'counts', 'totals', 'precisions', 'bp', 'sys_len', 'ref_len'] |
|
>>> print(round(results["score"], 1)) |
|
100.0 |
|
|
|
Example 2: |
|
>>> predictions = ["hello there general kenobi", |
|
... "on our way to ankh morpork"] |
|
>>> references = [["hello there general kenobi", "hello there !"], |
|
... ["goodbye ankh morpork", "ankh morpork"]] |
|
>>> sacrebleu = evaluate.load("sacrebleu") |
|
>>> results = sacrebleu.compute(predictions=predictions, |
|
... references=references) |
|
>>> print(list(results.keys())) |
|
['score', 'counts', 'totals', 'precisions', 'bp', 'sys_len', 'ref_len'] |
|
>>> print(round(results["score"], 1)) |
|
39.8 |
|
""" |
|
|
|
|
|
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) |
|
class Sacrebleu(evaluate.Metric): |
|
def _info(self): |
|
if version.parse(scb.__version__) < version.parse("1.4.12"): |
|
raise ImportWarning( |
|
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" |
|
'You can install it with `pip install "sacrebleu>=1.4.12"`.' |
|
) |
|
return evaluate.MetricInfo( |
|
description=_DESCRIPTION, |
|
citation=_CITATION, |
|
homepage="https://github.com/mjpost/sacreBLEU", |
|
inputs_description=_KWARGS_DESCRIPTION, |
|
features=[ |
|
datasets.Features( |
|
{ |
|
"predictions": datasets.Value("string", id="sequence"), |
|
"references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"), |
|
} |
|
), |
|
datasets.Features( |
|
{ |
|
"predictions": datasets.Value("string", id="sequence"), |
|
"references": datasets.Value("string", id="sequence"), |
|
} |
|
), |
|
], |
|
codebase_urls=["https://github.com/mjpost/sacreBLEU"], |
|
reference_urls=[ |
|
"https://github.com/mjpost/sacreBLEU", |
|
"https://en.wikipedia.org/wiki/BLEU", |
|
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", |
|
], |
|
) |
|
|
|
def _compute( |
|
self, |
|
predictions, |
|
references, |
|
smooth_method="exp", |
|
smooth_value=None, |
|
force=False, |
|
lowercase=False, |
|
tokenize=None, |
|
use_effective_order=False, |
|
): |
|
|
|
if isinstance(references[0], str): |
|
references = [[ref] for ref in references] |
|
|
|
references_per_prediction = len(references[0]) |
|
if any(len(refs) != references_per_prediction for refs in references): |
|
raise ValueError("Sacrebleu requires the same number of references for each prediction") |
|
transformed_references = [[refs[i] for refs in references] for i in range(references_per_prediction)] |
|
output = scb.corpus_bleu( |
|
predictions, |
|
transformed_references, |
|
smooth_method=smooth_method, |
|
smooth_value=smooth_value, |
|
force=force, |
|
lowercase=lowercase, |
|
use_effective_order=use_effective_order, |
|
**(dict(tokenize=tokenize) if tokenize else {}), |
|
) |
|
output_dict = { |
|
"score": output.score, |
|
"counts": output.counts, |
|
"totals": output.totals, |
|
"precisions": output.precisions, |
|
"bp": output.bp, |
|
"sys_len": output.sys_len, |
|
"ref_len": output.ref_len, |
|
} |
|
return output_dict |
|
|