Spaces:
Runtime error
Runtime error
import datasets | |
from sklearn.metrics import confusion_matrix | |
import evaluate | |
_DESCRIPTION = """ | |
FPR is the proportion of negative cases incorrectly identified as positive cases in the data (i.e. the probability that false alerts will be raised). It is defined as: | |
FPR = FP / (FP + TN) | |
Where: | |
TN: True negative | |
FP: False positive | |
""" | |
_KWARGS_DESCRIPTION = """ | |
Args: | |
predictions (`list` of `int`): Predicted labels. | |
references (`list` of `int`): Ground truth (correct) target values. | |
normalize (`boolean`): Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be normalized. | |
sample_weight (`list` of `float`): Sample weights. Defaults to None. | |
Returns: | |
false positive rate (`float` or `int`): FPR score. Minimum possible value is 0. Maximum possible value is 1.0. | |
""" | |
_CITATION = """ | |
@misc{ enwiki:1178431122, | |
author = "{Wikipedia contributors}", | |
title = "False positives and false negatives --- {Wikipedia}{,} The Free Encyclopedia", | |
year = "2023", | |
url = "https://en.wikipedia.org/w/index.php?title=False_positives_and_false_negatives&oldid=1178431122", | |
note = "[Online; accessed 17-November-2023]" | |
} | |
""" | |
class FPR(evaluate.Metric): | |
def _info(self): | |
return evaluate.MetricInfo( | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
inputs_description=_KWARGS_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"predictions": datasets.Sequence(datasets.Value("int32")), | |
"references": datasets.Sequence(datasets.Value("int32")), | |
} | |
if self.config_name == "multilabel" | |
else { | |
"predictions": datasets.Value("int32"), | |
"references": datasets.Value("int32"), | |
} | |
), | |
reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html"], | |
) | |
def _compute(self, predictions, references, normalize=None, sample_weight=None): | |
tn, fp, fn, tp = confusion_matrix(references, predictions, normalize=normalize, sample_weight=sample_weight).ravel() | |
fpr = fp / (fp + tn) | |
return {"false_positive_rate": fpr} |