text
stringlengths 0
15.3k
|
---|
def validation_docs(self): |
return self.dataset['validation'] |
def doc_to_text(self, doc): |
return 'Title: ' + doc['title'] + '\n\n' + 'Background: ' + doc['context'] + '\n\n' + 'Question: ' + doc['question'] + '\n\n' + 'Answer:' |
def should_decontaminate(self): |
return True |
def doc_to_decontamination_query(self, doc): |
return doc['context'] |
def doc_to_target(self, doc): |
answer_list = doc['answers']['text'] |
if len(answer_list) > 0: |
answer = answer_list[0] |
else: |
answer = 'unanswerable' |
return ' ' + answer |
def construct_requests(self, doc, ctx, **kwargs): |
return [Instance(request_type='generate_until', doc=doc, arguments=(ctx, {'until': ['\n']}), idx=0, **kwargs), Instance(request_type='loglikelihood', doc=doc, arguments=(ctx, ' ' + 'unanswerable'), idx=0, **kwargs)] |
def process_results(self, doc, results): |
(continuation, (logprob_unanswerable, _)) = results |
no_answer_probability = exp(logprob_unanswerable) |
predictions = {'id': doc['id'], 'prediction_text': continuation, 'no_answer_probability': no_answer_probability} |
references = {'id': doc['id'], 'answers': doc['answers']} |
return {'exact': (predictions, references), 'f1': (predictions, references), 'HasAns_exact': (predictions, references), 'HasAns_f1': (predictions, references), 'NoAns_exact': (predictions, references), 'NoAns_f1': (predictions, references), 'best_exact': (predictions, references), 'best_f1': (predictions, references)} |
def aggregation(self): |
return {'exact': partial(_squad_agg, 'exact'), 'f1': partial(_squad_agg, 'f1'), 'HasAns_exact': partial(_squad_agg, 'HasAns_exact'), 'HasAns_f1': partial(_squad_agg, 'HasAns_f1'), 'NoAns_exact': partial(_squad_agg, 'NoAns_exact'), 'NoAns_f1': partial(_squad_agg, 'NoAns_f1'), 'best_exact': partial(_squad_agg, 'best_exact'), 'best_f1': partial(_squad_agg, 'best_f1')} |
def higher_is_better(self): |
return {'exact': True, 'f1': True, 'HasAns_exact': True, 'HasAns_f1': True, 'NoAns_exact': True, 'NoAns_f1': True, 'best_exact': True, 'best_f1': True} |
# File: lm-evaluation-harness-main/lm_eval/tasks/super_glue/cb/aggregate.py |
import numpy as np |
import sklearn |
def cb_multi_fi(items): |
(preds, golds) = zip(*items) |
preds = np.array(preds) |
golds = np.array(golds) |
f11 = sklearn.metrics.f1_score(y_true=golds == 0, y_pred=preds == 0) |
f12 = sklearn.metrics.f1_score(y_true=golds == 1, y_pred=preds == 1) |
f13 = sklearn.metrics.f1_score(y_true=golds == 2, y_pred=preds == 2) |
avg_f1 = np.mean([f11, f12, f13]) |
return avg_f1 |
# File: lm-evaluation-harness-main/lm_eval/tasks/super_glue/cb/t5_utils.py |
import sklearn.metrics |
def mean_3class_f1(predictions, references): |
string_label = ['entailment', 'contradiction', 'neutral'] |
predictions = string_label.index(predictions[0]) if predictions[0] in string_label else 0 |
references = string_label.index(references[0]) |
return (predictions, references) |
def agg_mean_3class_f1(items): |
(predictions, references) = zip(*items) |
'' |
metric_str = 'fbeta_score' |
metric_fn_kwargs = {'beta': 1, 'labels': range(3), 'average': 'macro'} |
def _fn(predictions, references): |
metric_fn = getattr(sklearn.metrics, metric_str) |
metric_val = metric_fn(references, predictions, **metric_fn_kwargs) |
return metric_val |
return _fn(predictions, references) |
# File: lm-evaluation-harness-main/lm_eval/tasks/super_glue/copa/utils.py |
def convert_choice(choice): |
return choice[0].lower() + choice[1:] |
def doc_to_text(doc): |
connector = {'cause': 'because', 'effect': 'therefore'}[doc['question']] |
return doc['premise'].strip()[:-1] + f' {connector}' |
def doc_to_target(doc): |
correct_choice = doc['choice1'] if doc['label'] == 0 else doc['choice2'] |
return ' ' + convert_choice(correct_choice) |
def doc_to_choice(doc): |
return [' ' + convert_choice(doc['choice1']), ' ' + convert_choice(doc['choice2'])] |
# File: lm-evaluation-harness-main/lm_eval/tasks/super_glue/multirc/t5_utils.py |
import collections |
import numpy as np |
import sklearn.metrics |
def f1(predictions, references): |
_prediction = predictions[0] |
_reference = references[0].split('_')[-1] |
string_label = ['False', 'True'] |
reference = string_label.index(_reference) |
prediction = string_label.index(_prediction) if _prediction in string_label else not bool(reference) |
return (prediction, reference) |
def agg_f1(items): |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.