index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
4,529 | optbinning.binning.binning_process | _transform | null | def _transform(self, X, metric, metric_special, metric_missing,
show_digits, check_input):
# Check X dtype
if not isinstance(X, (pd.DataFrame, np.ndarray)):
raise TypeError("X must be a pandas.DataFrame or numpy.ndarray.")
n_samples, n_variables = X.shape
mask = self.get_support()
if not mask.any():
warn("No variables were selected: either the data is"
" too noisy or the selection_criteria too strict.",
UserWarning)
return np.empty(0).reshape((n_samples, 0))
if isinstance(X, np.ndarray) and len(mask) != n_variables:
raise ValueError("X has a different shape that during fitting.")
if isinstance(X, pd.DataFrame):
selected_variables = self.get_support(names=True)
for name in selected_variables:
if name not in X.columns:
raise ValueError("Selected variable {} must be a column "
"in the input dataframe.".format(name))
# Check metric
if metric in ("indices", "bins"):
if any(isinstance(optb, _OPTBPW_TYPES)
for optb in self._binned_variables.values()):
raise TypeError("metric {} not supported for piecewise "
"optimal binning objects.".format(metric))
indices_selected_variables = self.get_support(indices=True)
n_selected_variables = len(indices_selected_variables)
# Check if specific binning transform metrics were supplied, and
# whether these are compatible. Default base metric is the binning
# process transform metric.
base_metric = metric
if self.binning_transform_params is not None:
metrics = set()
if metric is not None:
metrics.add(metric)
for idx in indices_selected_variables:
name = self.variable_names[idx]
params = self.binning_transform_params.get(name, {})
metrics.add(params.get("metric", metric))
if len(metrics) > 1:
# indices and default transform metrics are numeric. If bins
# metrics is present the dtypes are incompatible.
if "bins" in metrics:
raise ValueError(
"metric 'bins' cannot be mixed with numeric metrics.")
else:
base_metric = metrics.pop()
if base_metric == "indices":
X_transform = np.full(
(n_samples, n_selected_variables), -1, dtype=int)
elif base_metric == "bins":
X_transform = np.full(
(n_samples, n_selected_variables), "", dtype=object)
else:
X_transform = np.zeros((n_samples, n_selected_variables))
for i, idx in enumerate(indices_selected_variables):
name = self.variable_names[idx]
optb = self._binned_variables[name]
if isinstance(X, np.ndarray):
x = X[:, idx]
else:
x = X[name]
params = {}
if self.binning_transform_params is not None:
params = self.binning_transform_params.get(name, {})
metric = params.get("metric", metric)
metric_missing = params.get("metric_missing", metric_missing)
metric_special = params.get("metric_special", metric_special)
tparams = {
"x": x,
"metric": metric,
"metric_special": metric_special,
"metric_missing": metric_missing,
"check_input": check_input,
"show_digits": show_digits
}
if isinstance(optb, _OPTBPW_TYPES):
tparams.pop("show_digits")
if metric is None:
tparams.pop("metric")
X_transform[:, i] = optb.transform(**tparams)
if isinstance(X, pd.DataFrame):
return pd.DataFrame(
X_transform, columns=selected_variables, index=X.index)
return X_transform
| (self, X, metric, metric_special, metric_missing, show_digits, check_input) |
4,530 | optbinning.binning.binning_process | _transform_disk | null | def _transform_disk(self, input_path, output_path, chunksize, metric,
metric_special, metric_missing, show_digits, **kwargs):
# check input_path and output_path extensions
input_extension = input_path.split(".")[1]
output_extension = output_path.split(".")[1]
if input_extension != "csv" or output_extension != "csv":
raise ValueError("input_path and output_path must be csv files.")
# check chunksize
if not isinstance(chunksize, numbers.Integral) or chunksize <= 0:
raise ValueError("chunksize must be a positive integer; got {}."
.format(chunksize))
# Check metric
if metric in ("indices", "bins"):
if any(isinstance(optb, _OPTBPW_TYPES)
for optb in self._binned_variables.values()):
raise TypeError("metric {} not supported for piecewise "
"optimal binning objects.".format(metric))
selected_variables = self.get_support(names=True)
n_selected_variables = len(selected_variables)
# Check if specific binning transform metrics were supplied, and
# whether these are compatible. Default base metric is the binning
# process transform metric.
base_metric = metric
if self.binning_transform_params is not None:
metrics = set()
if metric is not None:
metrics.add(metric)
for name in selected_variables:
params = self.binning_transform_params.get(name, {})
metrics.add(params.get("metric", metric))
if len(metrics) > 1:
# indices and default transform metrics are numeric. If bins
# metrics is present the dtypes are incompatible.
if "bins" in metrics:
raise ValueError(
"metric 'bins' cannot be mixed with numeric metrics.")
else:
base_metric = metrics.pop()
chunks = pd.read_csv(input_path, engine='c', chunksize=chunksize,
usecols=selected_variables, **kwargs)
for k, chunk in enumerate(chunks):
n_samples, n_variables = chunk.shape
if base_metric == "indices":
X_transform = np.full(
(n_samples, n_selected_variables), -1, dtype=int)
elif base_metric == "bins":
X_transform = np.full(
(n_samples, n_selected_variables), "", dtype=object)
else:
X_transform = np.zeros((n_samples, n_selected_variables))
for i, name in enumerate(selected_variables):
optb = self._binned_variables[name]
params = {}
if self.binning_transform_params is not None:
params = self.binning_transform_params.get(name, {})
metric = params.get("metric", metric)
metric_missing = params.get("metric_missing", metric_missing)
metric_special = params.get("metric_special", metric_special)
tparams = {
"x": chunk[name],
"metric": metric,
"metric_special": metric_special,
"metric_missing": metric_missing,
"show_digits": show_digits
}
if isinstance(optb, _OPTBPW_TYPES):
tparams.pop("show_digits")
if metric is None:
tparams.pop("metric")
X_transform[:, i] = optb.transform(**tparams)
df = pd.DataFrame(X_transform, columns=selected_variables)
df.to_csv(output_path, mode='a', index=False, header=(k == 0))
return self
| (self, input_path, output_path, chunksize, metric, metric_special, metric_missing, show_digits, **kwargs) |
4,531 | sklearn.base | _validate_data | Validate input data and set or check the `n_features_in_` attribute.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape (n_samples, n_features), default='no validation'
The input samples.
If `'no_validation'`, no validation is performed on `X`. This is
useful for meta-estimator which can delegate input validation to
their underlying estimator(s). In that case `y` must be passed and
the only accepted `check_params` are `multi_output` and
`y_numeric`.
y : array-like of shape (n_samples,), default='no_validation'
The targets.
- If `None`, `check_array` is called on `X`. If the estimator's
requires_y tag is True, then an error will be raised.
- If `'no_validation'`, `check_array` is called on `X` and the
estimator's requires_y tag is ignored. This is a default
placeholder and is never meant to be explicitly set. In that case
`X` must be passed.
- Otherwise, only `y` with `_check_y` or both `X` and `y` are
checked with either `check_array` or `check_X_y` depending on
`validate_separately`.
reset : bool, default=True
Whether to reset the `n_features_in_` attribute.
If False, the input will be checked for consistency with data
provided when reset was last True.
.. note::
It is recommended to call reset=True in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
validate_separately : False or tuple of dicts, default=False
Only used if y is not None.
If False, call validate_X_y(). Else, it must be a tuple of kwargs
to be used for calling check_array() on X and y respectively.
`estimator=self` is automatically added to these dicts to generate
more informative error message in case of invalid input data.
cast_to_ndarray : bool, default=True
Cast `X` and `y` to ndarray with checks in `check_params`. If
`False`, `X` and `y` are unchanged and only `feature_names_in_` and
`n_features_in_` are checked.
**check_params : kwargs
Parameters passed to :func:`sklearn.utils.check_array` or
:func:`sklearn.utils.check_X_y`. Ignored if validate_separately
is not False.
`estimator=self` is automatically added to these params to generate
more informative error message in case of invalid input data.
Returns
-------
out : {ndarray, sparse matrix} or tuple of these
The validated input. A tuple is returned if both `X` and `y` are
validated.
| def _validate_data(
self,
X="no_validation",
y="no_validation",
reset=True,
validate_separately=False,
cast_to_ndarray=True,
**check_params,
):
"""Validate input data and set or check the `n_features_in_` attribute.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape \
(n_samples, n_features), default='no validation'
The input samples.
If `'no_validation'`, no validation is performed on `X`. This is
useful for meta-estimator which can delegate input validation to
their underlying estimator(s). In that case `y` must be passed and
the only accepted `check_params` are `multi_output` and
`y_numeric`.
y : array-like of shape (n_samples,), default='no_validation'
The targets.
- If `None`, `check_array` is called on `X`. If the estimator's
requires_y tag is True, then an error will be raised.
- If `'no_validation'`, `check_array` is called on `X` and the
estimator's requires_y tag is ignored. This is a default
placeholder and is never meant to be explicitly set. In that case
`X` must be passed.
- Otherwise, only `y` with `_check_y` or both `X` and `y` are
checked with either `check_array` or `check_X_y` depending on
`validate_separately`.
reset : bool, default=True
Whether to reset the `n_features_in_` attribute.
If False, the input will be checked for consistency with data
provided when reset was last True.
.. note::
It is recommended to call reset=True in `fit` and in the first
call to `partial_fit`. All other methods that validate `X`
should set `reset=False`.
validate_separately : False or tuple of dicts, default=False
Only used if y is not None.
If False, call validate_X_y(). Else, it must be a tuple of kwargs
to be used for calling check_array() on X and y respectively.
`estimator=self` is automatically added to these dicts to generate
more informative error message in case of invalid input data.
cast_to_ndarray : bool, default=True
Cast `X` and `y` to ndarray with checks in `check_params`. If
`False`, `X` and `y` are unchanged and only `feature_names_in_` and
`n_features_in_` are checked.
**check_params : kwargs
Parameters passed to :func:`sklearn.utils.check_array` or
:func:`sklearn.utils.check_X_y`. Ignored if validate_separately
is not False.
`estimator=self` is automatically added to these params to generate
more informative error message in case of invalid input data.
Returns
-------
out : {ndarray, sparse matrix} or tuple of these
The validated input. A tuple is returned if both `X` and `y` are
validated.
"""
self._check_feature_names(X, reset=reset)
if y is None and self._get_tags()["requires_y"]:
raise ValueError(
f"This {self.__class__.__name__} estimator "
"requires y to be passed, but the target y is None."
)
no_val_X = isinstance(X, str) and X == "no_validation"
no_val_y = y is None or isinstance(y, str) and y == "no_validation"
if no_val_X and no_val_y:
raise ValueError("Validation should be done on X, y or both.")
default_check_params = {"estimator": self}
check_params = {**default_check_params, **check_params}
if not cast_to_ndarray:
if not no_val_X and no_val_y:
out = X
elif no_val_X and not no_val_y:
out = y
else:
out = X, y
elif not no_val_X and no_val_y:
out = check_array(X, input_name="X", **check_params)
elif no_val_X and not no_val_y:
out = _check_y(y, **check_params)
else:
if validate_separately:
# We need this because some estimators validate X and y
# separately, and in general, separately calling check_array()
# on X and y isn't equivalent to just calling check_X_y()
# :(
check_X_params, check_y_params = validate_separately
if "estimator" not in check_X_params:
check_X_params = {**default_check_params, **check_X_params}
X = check_array(X, input_name="X", **check_X_params)
if "estimator" not in check_y_params:
check_y_params = {**default_check_params, **check_y_params}
y = check_array(y, input_name="y", **check_y_params)
else:
X, y = check_X_y(X, y, **check_params)
out = X, y
if not no_val_X and check_params.get("ensure_2d", True):
self._check_n_features(X, reset=reset)
return out
| (self, X='no_validation', y='no_validation', reset=True, validate_separately=False, cast_to_ndarray=True, **check_params) |
4,532 | sklearn.base | _validate_params | Validate types and values of constructor parameters
The expected type and values must be defined in the `_parameter_constraints`
class attribute, which is a dictionary `param_name: list of constraints`. See
the docstring of `validate_parameter_constraints` for a description of the
accepted constraints.
| def _validate_params(self):
"""Validate types and values of constructor parameters
The expected type and values must be defined in the `_parameter_constraints`
class attribute, which is a dictionary `param_name: list of constraints`. See
the docstring of `validate_parameter_constraints` for a description of the
accepted constraints.
"""
validate_parameter_constraints(
self._parameter_constraints,
self.get_params(deep=False),
caller_name=self.__class__.__name__,
)
| (self) |
4,533 | optbinning.binning.binning_process | fit | Fit the binning process. Fit the optimal binning to all variables
according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
.. versionchanged:: 0.4.0
X supports ``numpy.ndarray`` and ``pandas.DataFrame``.
y : array-like of shape (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``. This option is only
available for a binary target.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : BinningProcess
Fitted binning process.
| def fit(self, X, y, sample_weight=None, check_input=False):
"""Fit the binning process. Fit the optimal binning to all variables
according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
.. versionchanged:: 0.4.0
X supports ``numpy.ndarray`` and ``pandas.DataFrame``.
y : array-like of shape (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``. This option is only
available for a binary target.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : BinningProcess
Fitted binning process.
"""
return self._fit(X, y, sample_weight, check_input)
| (self, X, y, sample_weight=None, check_input=False) |
4,534 | optbinning.binning.binning_process | fit_disk | Fit the binning process according to the given training data on
disk.
Parameters
----------
input_path : str
Any valid string path to a file with extension .csv or .parquet.
target : str
Target column.
**kwargs : keyword arguments
Keyword arguments for ``pandas.read_csv`` or
``pandas.read_parquet``.
Returns
-------
self : BinningProcess
Fitted binning process.
| def fit_disk(self, input_path, target, **kwargs):
"""Fit the binning process according to the given training data on
disk.
Parameters
----------
input_path : str
Any valid string path to a file with extension .csv or .parquet.
target : str
Target column.
**kwargs : keyword arguments
Keyword arguments for ``pandas.read_csv`` or
``pandas.read_parquet``.
Returns
-------
self : BinningProcess
Fitted binning process.
"""
return self._fit_disk(input_path, target, **kwargs)
| (self, input_path, target, **kwargs) |
4,535 | optbinning.binning.binning_process | fit_from_dict | Fit the binning process from a dict of OptimalBinning objects
already fitted.
Parameters
----------
dict_optb : dict
Dictionary with OptimalBinning objects for binary, continuous
or multiclass target. All objects must share the same class.
Returns
-------
self : BinningProcess
Fitted binning process.
| def fit_from_dict(self, dict_optb):
"""Fit the binning process from a dict of OptimalBinning objects
already fitted.
Parameters
----------
dict_optb : dict
Dictionary with OptimalBinning objects for binary, continuous
or multiclass target. All objects must share the same class.
Returns
-------
self : BinningProcess
Fitted binning process.
"""
return self._fit_from_dict(dict_optb)
| (self, dict_optb) |
4,536 | optbinning.binning.binning_process | fit_transform | Fit the binning process according to the given training data, then
transform it.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
y : array-like of shape (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``. This option is only
available for a binary target.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" and "bins".
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
X_new : numpy array, shape = (n_samples, n_features_new)
Transformed array.
| def fit_transform(self, X, y, sample_weight=None, metric=None,
metric_special=0, metric_missing=0, show_digits=2,
check_input=False):
"""Fit the binning process according to the given training data, then
transform it.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
y : array-like of shape (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``. This option is only
available for a binary target.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" and "bins".
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
X_new : numpy array, shape = (n_samples, n_features_new)
Transformed array.
"""
return self.fit(X, y, sample_weight, check_input).transform(
X, metric, metric_special, metric_missing, show_digits,
check_input)
| (self, X, y, sample_weight=None, metric=None, metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,537 | optbinning.binning.binning_process | fit_transform_disk | Fit the binning process according to the given training data on
disk, then transform it and save to comma-separated values (csv) file.
Parameters
----------
input_path : str
Any valid string path to a file with extension .csv.
output_path : str
Any valid string path to a file with extension .csv.
target : str
Target column.
chunksize :
Rows to read, transform and write at a time.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" and "bins".
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
**kwargs : keyword arguments
Keyword arguments for ``pandas.read_csv``.
Returns
-------
self : BinningProcess
Fitted binning process.
| def fit_transform_disk(self, input_path, output_path, target, chunksize,
metric=None, metric_special=0, metric_missing=0,
show_digits=2, **kwargs):
"""Fit the binning process according to the given training data on
disk, then transform it and save to comma-separated values (csv) file.
Parameters
----------
input_path : str
Any valid string path to a file with extension .csv.
output_path : str
Any valid string path to a file with extension .csv.
target : str
Target column.
chunksize :
Rows to read, transform and write at a time.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" and "bins".
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
**kwargs : keyword arguments
Keyword arguments for ``pandas.read_csv``.
Returns
-------
self : BinningProcess
Fitted binning process.
"""
return self.fit_disk(input_path, target, **kwargs).transform_disk(
input_path, output_path, chunksize, metric, metric_special,
metric_missing, show_digits, **kwargs)
| (self, input_path, output_path, target, chunksize, metric=None, metric_special=0, metric_missing=0, show_digits=2, **kwargs) |
4,538 | optbinning.binning.binning_process | get_binned_variable | Return optimal binning object for a given variable name.
Parameters
----------
name : string
The variable name.
| def get_binned_variable(self, name):
"""Return optimal binning object for a given variable name.
Parameters
----------
name : string
The variable name.
"""
self._check_is_fitted()
if not isinstance(name, str):
raise TypeError("name must be a string.")
if name in self.variable_names:
return self._binned_variables[name]
else:
raise ValueError("name {} does not match a binned variable."
.format(name))
| (self, name) |
4,539 | sklearn.utils._metadata_requests | get_metadata_routing | Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRequest
A :class:`~sklearn.utils.metadata_routing.MetadataRequest` encapsulating
routing information.
| def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRequest
A :class:`~sklearn.utils.metadata_routing.MetadataRequest` encapsulating
routing information.
"""
return self._get_metadata_request()
| (self) |
4,540 | sklearn.base | get_params |
Get parameters for this estimator.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
| def get_params(self, deep=True):
"""
Get parameters for this estimator.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
value = getattr(self, key)
if deep and hasattr(value, "get_params") and not isinstance(value, type):
deep_items = value.get_params().items()
out.update((key + "__" + k, val) for k, val in deep_items)
out[key] = value
return out
| (self, deep=True) |
4,541 | optbinning.binning.binning_process | get_support | Get a mask, or integer index, or names of the variables selected.
Parameters
----------
indices : boolean (default=False)
If True, the return value will be an array of integers, rather
than a boolean mask.
names : boolean (default=False)
If True, the return value will be an array of strings, rather
than a boolean mask.
Returns
-------
support : array
An index that selects the retained features from a feature vector.
If `indices` is False, this is a boolean array of shape
[# input features], in which an element is True iff its
corresponding feature is selected for retention. If `indices` is
True, this is an integer array of shape [# output features] whose
values are indices into the input feature vector. If `names` is
True, this is an string array of sahpe [# output features], whose
values are names of the selected features.
| def get_support(self, indices=False, names=False):
"""Get a mask, or integer index, or names of the variables selected.
Parameters
----------
indices : boolean (default=False)
If True, the return value will be an array of integers, rather
than a boolean mask.
names : boolean (default=False)
If True, the return value will be an array of strings, rather
than a boolean mask.
Returns
-------
support : array
An index that selects the retained features from a feature vector.
If `indices` is False, this is a boolean array of shape
[# input features], in which an element is True iff its
corresponding feature is selected for retention. If `indices` is
True, this is an integer array of shape [# output features] whose
values are indices into the input feature vector. If `names` is
True, this is an string array of sahpe [# output features], whose
values are names of the selected features.
"""
self._check_is_fitted()
if indices and names:
raise ValueError("Only indices or names can be True.")
mask = self._support
if indices:
return np.where(mask)[0]
elif names:
return np.asarray(self.variable_names)[mask]
else:
return mask
| (self, indices=False, names=False) |
4,542 | optbinning.binning.binning_process | information | Print overview information about the options settings and
statistics.
Parameters
----------
print_level : int (default=1)
Level of details.
| def information(self, print_level=1):
"""Print overview information about the options settings and
statistics.
Parameters
----------
print_level : int (default=1)
Level of details.
"""
self._check_is_fitted()
if not isinstance(print_level, numbers.Integral) or print_level < 0:
raise ValueError("print_level must be an integer >= 0; got {}."
.format(print_level))
n_numerical = list(self._variable_dtypes.values()).count("numerical")
n_categorical = self._n_variables - n_numerical
self._n_selected = np.count_nonzero(self._support)
dict_user_options = self.get_params()
print_binning_process_information(
print_level, self._n_samples, self._n_variables,
self._target_dtype, n_numerical, n_categorical,
self._n_selected, self._time_total, dict_user_options)
| (self, print_level=1) |
4,543 | optbinning.binning.binning_process | save | Save binning process to pickle file.
Parameters
----------
path : str
Pickle file path.
| def save(self, path):
"""Save binning process to pickle file.
Parameters
----------
path : str
Pickle file path.
"""
if not isinstance(path, str):
raise TypeError("path must be a string.")
with open(path, "wb") as f:
pickle.dump(self, f)
| (self, path) |
4,544 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``check_input`` parameter in ``fit``.
sample_weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``sample_weight`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: optbinning.binning.binning_process.BinningProcess, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$', sample_weight: Union[bool, NoneType, str] = '$UNCHANGED$') -> optbinning.binning.binning_process.BinningProcess |
4,545 | sklearn.base | set_params | Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as :class:`~sklearn.pipeline.Pipeline`). The latter have
parameters of the form ``<component>__<parameter>`` so that it's
possible to update each component of a nested object.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
| def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as :class:`~sklearn.pipeline.Pipeline`). The latter have
parameters of the form ``<component>__<parameter>`` so that it's
possible to update each component of a nested object.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
"""
if not params:
# Simple optimization to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
nested_params = defaultdict(dict) # grouped by prefix
for key, value in params.items():
key, delim, sub_key = key.partition("__")
if key not in valid_params:
local_valid_params = self._get_param_names()
raise ValueError(
f"Invalid parameter {key!r} for estimator {self}. "
f"Valid parameters are: {local_valid_params!r}."
)
if delim:
nested_params[key][sub_key] = value
else:
setattr(self, key, value)
valid_params[key] = value
for key, sub_params in nested_params.items():
valid_params[key].set_params(**sub_params)
return self
| (self, **params) |
4,546 | sklearn.utils._metadata_requests | set_transform_request | Request metadata passed to the ``transform`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``transform`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``transform``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``check_input`` parameter in ``transform``.
metric : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``metric`` parameter in ``transform``.
metric_missing : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``metric_missing`` parameter in ``transform``.
metric_special : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``metric_special`` parameter in ``transform``.
show_digits : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``show_digits`` parameter in ``transform``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: optbinning.binning.binning_process.BinningProcess, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$', metric: Union[bool, NoneType, str] = '$UNCHANGED$', metric_missing: Union[bool, NoneType, str] = '$UNCHANGED$', metric_special: Union[bool, NoneType, str] = '$UNCHANGED$', show_digits: Union[bool, NoneType, str] = '$UNCHANGED$') -> optbinning.binning.binning_process.BinningProcess |
4,547 | optbinning.binning.binning_process | summary | Binning process summary with main statistics for all binned
variables.
Parameters
----------
df_summary : pandas.DataFrame
Binning process summary.
| def summary(self):
"""Binning process summary with main statistics for all binned
variables.
Parameters
----------
df_summary : pandas.DataFrame
Binning process summary.
"""
self._check_is_fitted()
if self._is_updated:
self._binning_selection_criteria()
self._is_updated = False
df_summary = pd.DataFrame.from_dict(self._variable_stats).T
df_summary.reset_index(inplace=True)
df_summary.rename(columns={"index": "name"}, inplace=True)
df_summary["selected"] = self._support
columns = ["name", "dtype", "status", "selected", "n_bins"]
columns += _METRICS[self._target_dtype]["metrics"]
return df_summary[columns]
| (self) |
4,548 | optbinning.binning.binning_process | transform | Transform given data to metric using bins from each fitted optimal
binning.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" and "bins".
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
X_new : numpy array or pandas.DataFrame, shape = (n_samples,
n_features_new)
Transformed array.
| def transform(self, X, metric=None, metric_special=0, metric_missing=0,
show_digits=2, check_input=False):
"""Transform given data to metric using bins from each fitted optimal
binning.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" and "bins".
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
X_new : numpy array or pandas.DataFrame, shape = (n_samples,
n_features_new)
Transformed array.
"""
self._check_is_fitted()
return self._transform(X, metric, metric_special, metric_missing,
show_digits, check_input)
| (self, X, metric=None, metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,549 | optbinning.binning.binning_process | transform_disk | Transform given data on disk to metric using bins from each fitted
optimal binning. Save to comma-separated values (csv) file.
Parameters
----------
input_path : str
Any valid string path to a file with extension .csv.
output_path : str
Any valid string path to a file with extension .csv.
chunksize :
Rows to read, transform and write at a time.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" and "bins".
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
**kwargs : keyword arguments
Keyword arguments for ``pandas.read_csv``.
Returns
-------
self : BinningProcess
Fitted binning process.
| def transform_disk(self, input_path, output_path, chunksize, metric=None,
metric_special=0, metric_missing=0, show_digits=2,
**kwargs):
"""Transform given data on disk to metric using bins from each fitted
optimal binning. Save to comma-separated values (csv) file.
Parameters
----------
input_path : str
Any valid string path to a file with extension .csv.
output_path : str
Any valid string path to a file with extension .csv.
chunksize :
Rows to read, transform and write at a time.
metric : str or None, (default=None)
The metric used to transform the input vector. If None, the default
transformation metric for each target type is applied. For binary
target options are: "woe" (default), "event_rate", "indices" and
"bins". For continuous target options are: "mean" (default),
"indices" and "bins". For multiclass target options are:
"mean_woe" (default), "weighted_mean_woe", "indices" and "bins".
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate for a binary target, and any numerical value for other
targets.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
**kwargs : keyword arguments
Keyword arguments for ``pandas.read_csv``.
Returns
-------
self : BinningProcess
Fitted binning process.
"""
self._check_is_fitted()
return self._transform_disk(input_path, output_path, chunksize, metric,
metric_special, metric_missing,
show_digits, **kwargs)
| (self, input_path, output_path, chunksize, metric=None, metric_special=0, metric_missing=0, show_digits=2, **kwargs) |
4,550 | optbinning.binning.binning_process | update_binned_variable | Update optimal binning object for a given variable.
Parameters
----------
name : string
The variable name.
optb : object
The optimal binning object already fitted.
| def update_binned_variable(self, name, optb):
"""Update optimal binning object for a given variable.
Parameters
----------
name : string
The variable name.
optb : object
The optimal binning object already fitted.
"""
self._check_is_fitted()
if not isinstance(name, str):
raise TypeError("name must be a string.")
if name not in self.variable_names:
raise ValueError("name {} does not match a binned variable."
.format(name))
optb_types = _OPTB_TYPES + _OPTBPW_TYPES
if not isinstance(optb, optb_types):
raise TypeError("Object {} must be of type ({}); got {}"
.format(name, optb_types, type(optb)))
# Check current class
if self._target_dtype == "binary":
optb_binary = (OptimalBinning, OptimalPWBinning)
if not isinstance(optb, optb_binary):
raise TypeError("target is binary and Object {} must be of "
"type {}.".format(optb, optb_binary))
elif self._target_dtype == "continuous":
optb_continuous = (ContinuousOptimalBinning,
ContinuousOptimalPWBinning)
if not isinstance(optb, optb_continuous):
raise TypeError("target is continuous and Object {} must be "
"of type {}.".format(optb, optb_continuous))
elif self._target_dtype == "multiclass":
if not isinstance(optb, MulticlassOptimalBinning):
raise TypeError("target is multiclass and Object {} must be "
"of type {}.".format(
optb, MulticlassOptimalBinning))
optb_old = self._binned_variables[name]
if optb_old.name and optb_old.name != optb.name:
raise ValueError("Update object name must match old object name; "
"{} != {}.".format(optb_old.name, optb.name))
if optb.name and name != optb.name:
raise ValueError("name and object name must coincide.")
self._binned_variables[name] = optb
self._is_updated = True
| (self, name, optb) |
4,551 | optbinning.binning.distributed.binning_process_sketch | BinningProcessSketch | Binning process over data streams to compute optimal binning of
variables with respect to a binary target.
Parameters
----------
variable_names : array-like
List of variable names.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
selection_criteria : dict or None (default=None)
Variable selection criteria. See notes.
special_codes : array-like or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
categorical_variables : array-like or None, optional (default=None)
List of variables numerical variables to be considered categorical.
These are nominal variables. Not applicable when target type is
multiclass.
binning_fit_params : dict or None, optional (default=None)
Dictionary with optimal binning fitting options for specific variables.
Example: ``{"variable_1": {"max_n_bins": 4}}``.
binning_transform_params : dict or None, optional (default=None)
Dictionary with optimal binning transform options for specific
variables. Example ``{"variable_1": {"metric": "event_rate"}}``.
verbose : bool (default=False)
Enable verbose output.
Notes
-----
Parameter ``selection_criteria`` allows to specify criteria for
variable selection. The input is a dictionary as follows
.. code::
selection_criteria = {
"metric_1":
{
"min": 0, "max": 1, "strategy": "highest", "top": 0.25
},
"metric_2":
{
"min": 0.02
}
}
where several metrics can be combined. For example, above dictionary
indicates that top 25% variables with "metric_1" in [0, 1] and "metric_2"
greater or equal than 0.02 are selected. Supported key values are:
* keys ``min`` and ``max`` support numerical values.
* key ``strategy`` supports options "highest" and "lowest".
* key ``top`` supports an integer or decimal (percentage).
.. warning::
If the binning process instance is going to be saved, do not pass the
option ``"solver": "mip"`` via the binning_fit_params parameter.
| class BinningProcessSketch(BaseSketch, BaseEstimator, BaseBinningProcess):
"""Binning process over data streams to compute optimal binning of
variables with respect to a binary target.
Parameters
----------
variable_names : array-like
List of variable names.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
selection_criteria : dict or None (default=None)
Variable selection criteria. See notes.
special_codes : array-like or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
categorical_variables : array-like or None, optional (default=None)
List of variables numerical variables to be considered categorical.
These are nominal variables. Not applicable when target type is
multiclass.
binning_fit_params : dict or None, optional (default=None)
Dictionary with optimal binning fitting options for specific variables.
Example: ``{"variable_1": {"max_n_bins": 4}}``.
binning_transform_params : dict or None, optional (default=None)
Dictionary with optimal binning transform options for specific
variables. Example ``{"variable_1": {"metric": "event_rate"}}``.
verbose : bool (default=False)
Enable verbose output.
Notes
-----
Parameter ``selection_criteria`` allows to specify criteria for
variable selection. The input is a dictionary as follows
.. code::
selection_criteria = {
"metric_1":
{
"min": 0, "max": 1, "strategy": "highest", "top": 0.25
},
"metric_2":
{
"min": 0.02
}
}
where several metrics can be combined. For example, above dictionary
indicates that top 25% variables with "metric_1" in [0, 1] and "metric_2"
greater or equal than 0.02 are selected. Supported key values are:
* keys ``min`` and ``max`` support numerical values.
* key ``strategy`` supports options "highest" and "lowest".
* key ``top`` supports an integer or decimal (percentage).
.. warning::
If the binning process instance is going to be saved, do not pass the
option ``"solver": "mip"`` via the binning_fit_params parameter.
"""
def __init__(self, variable_names, max_n_prebins=20, min_n_bins=None,
max_n_bins=None, min_bin_size=None, max_bin_size=None,
max_pvalue=None, max_pvalue_policy="consecutive",
selection_criteria=None, categorical_variables=None,
special_codes=None, split_digits=None,
binning_fit_params=None, binning_transform_params=None,
verbose=False):
self.variable_names = variable_names
self.max_n_prebins = max_n_prebins
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.selection_criteria = selection_criteria
self.binning_fit_params = binning_fit_params
self.binning_transform_params = binning_transform_params
self.special_codes = special_codes
self.split_digits = split_digits
self.categorical_variables = categorical_variables
self.verbose = verbose
# target information to reuse BaseBinningProcess
self._target_dtype = "binary"
# auxiliary
self._n_samples = None
self._n_variables = None
self._n_numerical = None
self._n_categorical = None
self._n_selected = None
self._binned_variables = {}
self._variable_dtypes = {}
self._variable_stats = {}
self._support = None
# streaming stats
self._n_add = 0
self._n_solve = 0
# timing
self._time_streaming_add = 0
self._time_streaming_solve = 0
# flags
self._is_started = False
self._is_solved = False
# Check parameters
_check_parameters(**self.get_params())
def add(self, X, y, check_input=False):
"""Add new data X, y to the binning sketch of each variable.
Parameters
----------
X : pandas.DataFrame, shape (n_samples, n_features)
y : array-like of shape (n_samples,)
Target vector relative to x.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : BinningProcessSketch
Binning process with new data.
"""
if not self._is_started:
self._n_samples = 0
self._n_variables = len(self.variable_names)
if self.categorical_variables is not None:
self._n_categorical = len(self.categorical_variables)
else:
self._n_categorical = 0
self._n_numerical = self._n_variables - self._n_categorical
# Check selection criteria
if self.selection_criteria is not None:
_check_selection_criteria(self.selection_criteria,
self._target_dtype)
# Initialize bsketch for each variable. To avoid mixed dtypes
# the user must provide a dtype for all variables. This differs
# from the BinningProcess, where dtypes are inferred.
for name in self.variable_names:
if (self.categorical_variables is not None and
name in self.categorical_variables):
dtype = "categorical"
else:
dtype = "numerical"
optb = OptimalBinningSketch(
name=name, dtype=dtype,
max_n_prebins=self.max_n_prebins,
min_n_bins=self.min_n_bins,
max_n_bins=self.max_n_bins,
min_bin_size=self.min_bin_size,
max_pvalue=self.max_pvalue,
max_pvalue_policy=self.max_pvalue_policy,
special_codes=self.special_codes,
split_digits=self.split_digits)
if self.binning_fit_params is not None:
params = self.binning_fit_params.get(name, {})
else:
params = {}
optb.set_params(**params)
self._variable_dtypes[name] = dtype
self._binned_variables[name] = optb
self._is_started = True
# Add new data stream
time_add = time.perf_counter()
# Add data to variables that appear in X. During training the
# data columns might change, for example, not all data sources
# contain the same variables.
for name in X.columns:
if name in self.variable_names:
if self.verbose:
logger.info("Add variable: {}.".format(name))
self._binned_variables[name].add(X[name], y, check_input)
# Update count samples and addition operations
self._n_samples += X.shape[0]
self._n_add += 1
self._time_streaming_add += time.perf_counter() - time_add
if self.verbose:
logger.info("Sketch: added new data.")
return self
def information(self, print_level=1):
"""Print overview information about the options settings and
statistics.
Parameters
----------
print_level : int (default=1)
Level of details.
"""
self._check_is_solved()
if not isinstance(print_level, numbers.Integral) or print_level < 0:
raise ValueError("print_level must be an integer >= 0; got {}."
.format(print_level))
self._n_selected = np.count_nonzero(self._support)
dict_user_options = self.get_params()
print_binning_process_sketch_information(
print_level, self._n_samples, self._n_variables,
self._target_dtype, self._n_numerical, self._n_categorical,
self._n_selected, self._n_add, self._time_streaming_add,
self._n_solve, self._time_streaming_solve, dict_user_options)
def summary(self):
"""Binning process summary with main statistics for all binned
variables.
Parameters
----------
df_summary : pandas.DataFrame
Binning process summary.
"""
self._check_is_solved()
df_summary = pd.DataFrame.from_dict(self._variable_stats).T
df_summary.reset_index(inplace=True)
df_summary.rename(columns={"index": "name"}, inplace=True)
df_summary["selected"] = self._support
columns = ["name", "dtype", "status", "selected", "n_bins"]
columns += _METRICS[self._target_dtype]["metrics"]
return df_summary[columns]
def merge(self, bpsketch):
"""Merge current instance with another BinningProcessSketch instance.
Parameters
----------
bpsketch : object
BinningProcessSketch instance.
"""
if not self.mergeable(bpsketch):
raise Exception("bpsketch does not share signature.")
for name in self.variable_names:
self._binned_variables[name].merge(
bpsketch._binned_variables[name])
if self.verbose:
logger.info("Sketch: current sketch was merged.")
def mergeable(self, bpsketch):
"""Check whether two BinningProcessSketch instances can be merged.
Parameters
----------
bpsketch : object
BinningProcessSketch instance.
Returns
-------
mergeable : bool
"""
return self.get_params() == bpsketch.get_params()
def solve(self):
"""Solve optimal binning for all variables using added data.
Returns
-------
self : BinningProcessSketch
Current fitted binning process.
"""
time_init = time.perf_counter()
# Check if data was added
if not self._n_add:
raise NotDataAddedError(
"No data was added. Add data before solving.")
for i, name in enumerate(self.variable_names):
if self.verbose:
logger.info("Binning variable ({} / {}): {}."
.format(i, self._n_variables, name))
self._binned_variables[name].solve()
if self.verbose:
logger.info("Binning process variable selection...")
# Compute binning statistics and decide whether a variable is selected
self._binning_selection_criteria()
self._time_streaming_solve += time.perf_counter() - time_init
self._n_solve += 1
# Completed successfully
self._is_solved = True
return self
def transform(self, X, metric="woe", metric_special=0, metric_missing=0,
show_digits=2, check_input=False):
"""Transform given data to metric using bins from each fitted optimal
binning.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
X_new : pandas.DataFrame, shape = (n_samples, n_features_new)
Transformed array.
"""
self._check_is_solved()
# Check X dtype
if not isinstance(X, pd.DataFrame):
raise TypeError("X must be a pandas.DataFrame.")
n_samples, n_variables = X.shape
# Check metric
if metric not in ("event_rate", "woe"):
raise ValueError('Invalid value for metric. Allowed string '
'values are "event_rate" and "woe".')
mask = self.get_support()
if not mask.any():
warn("No variables were selected: either the data is"
" too noisy or the selection_criteria too strict.",
UserWarning)
return np.empty(0).reshape((n_samples, 0))
selected_variables = self.get_support(names=True)
for name in selected_variables:
if name not in X.columns:
raise ValueError("Selected variable {} must be a column "
"in the input dataframe.".format(name))
n_selected_variables = len(selected_variables)
if metric == "indices":
X_transform = np.full(
(n_samples, n_selected_variables), -1, dtype=int)
elif metric == "bins":
X_transform = np.full(
(n_samples, n_selected_variables), "", dtype=object)
else:
X_transform = np.zeros((n_samples, n_selected_variables))
for i, name in enumerate(selected_variables):
optb = self._binned_variables[name]
x = X[name]
params = {}
if self.binning_transform_params is not None:
params = self.binning_transform_params.get(name, {})
metric_missing = params.get("metric_missing", metric_missing)
metric_special = params.get("metric_special", metric_special)
tparams = {
"x": x,
"metric": metric,
"metric_special": metric_special,
"metric_missing": metric_missing,
"check_input": check_input,
"show_digits": show_digits
}
if metric is not None:
tparams["metric"] = params.get("metric", metric)
else:
tparams.pop("metric")
X_transform[:, i] = optb.transform(**tparams)
return pd.DataFrame(X_transform, columns=selected_variables)
def get_binned_variable(self, name):
"""Return optimal binning sketch object for a given variable name.
Parameters
----------
name : string
The variable name.
"""
self._check_is_solved()
if not isinstance(name, str):
raise TypeError("name must be a string.")
if name in self.variable_names:
return self._binned_variables[name]
else:
raise ValueError("name {} does not match a binned variable."
.format(name))
def get_support(self, indices=False, names=False):
"""Get a mask, or integer index, or names of the variables selected.
Parameters
----------
indices : boolean (default=False)
If True, the return value will be an array of integers, rather
than a boolean mask.
names : boolean (default=False)
If True, the return value will be an array of strings, rather
than a boolean mask.
Returns
-------
support : array
An index that selects the retained features from a feature vector.
If `indices` is False, this is a boolean array of shape
[# input features], in which an element is True iff its
corresponding feature is selected for retention. If `indices` is
True, this is an integer array of shape [# output features] whose
values are indices into the input feature vector. If `names` is
True, this is an string array of sahpe [# output features], whose
values are names of the selected features.
"""
self._check_is_solved()
if indices and names:
raise ValueError("Only indices or names can be True.")
mask = self._support
if indices:
return np.where(mask)[0]
elif names:
return np.asarray(self.variable_names)[mask]
else:
return mask
| (variable_names, max_n_prebins=20, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, max_pvalue=None, max_pvalue_policy='consecutive', selection_criteria=None, categorical_variables=None, special_codes=None, split_digits=None, binning_fit_params=None, binning_transform_params=None, verbose=False) |
4,553 | optbinning.binning.distributed.binning_process_sketch | __init__ | null | def __init__(self, variable_names, max_n_prebins=20, min_n_bins=None,
max_n_bins=None, min_bin_size=None, max_bin_size=None,
max_pvalue=None, max_pvalue_policy="consecutive",
selection_criteria=None, categorical_variables=None,
special_codes=None, split_digits=None,
binning_fit_params=None, binning_transform_params=None,
verbose=False):
self.variable_names = variable_names
self.max_n_prebins = max_n_prebins
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.selection_criteria = selection_criteria
self.binning_fit_params = binning_fit_params
self.binning_transform_params = binning_transform_params
self.special_codes = special_codes
self.split_digits = split_digits
self.categorical_variables = categorical_variables
self.verbose = verbose
# target information to reuse BaseBinningProcess
self._target_dtype = "binary"
# auxiliary
self._n_samples = None
self._n_variables = None
self._n_numerical = None
self._n_categorical = None
self._n_selected = None
self._binned_variables = {}
self._variable_dtypes = {}
self._variable_stats = {}
self._support = None
# streaming stats
self._n_add = 0
self._n_solve = 0
# timing
self._time_streaming_add = 0
self._time_streaming_solve = 0
# flags
self._is_started = False
self._is_solved = False
# Check parameters
_check_parameters(**self.get_params())
| (self, variable_names, max_n_prebins=20, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, max_pvalue=None, max_pvalue_policy='consecutive', selection_criteria=None, categorical_variables=None, special_codes=None, split_digits=None, binning_fit_params=None, binning_transform_params=None, verbose=False) |
4,559 | optbinning.binning.distributed.base | _check_is_solved | null | def _check_is_solved(self):
if not self._is_solved:
raise NotSolvedError("This {} instance is not solved yet. Call "
"'solve' with appropriate arguments."
.format(self.__class__.__name__))
| (self) |
4,570 | optbinning.binning.distributed.binning_process_sketch | add | Add new data X, y to the binning sketch of each variable.
Parameters
----------
X : pandas.DataFrame, shape (n_samples, n_features)
y : array-like of shape (n_samples,)
Target vector relative to x.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : BinningProcessSketch
Binning process with new data.
| def add(self, X, y, check_input=False):
"""Add new data X, y to the binning sketch of each variable.
Parameters
----------
X : pandas.DataFrame, shape (n_samples, n_features)
y : array-like of shape (n_samples,)
Target vector relative to x.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : BinningProcessSketch
Binning process with new data.
"""
if not self._is_started:
self._n_samples = 0
self._n_variables = len(self.variable_names)
if self.categorical_variables is not None:
self._n_categorical = len(self.categorical_variables)
else:
self._n_categorical = 0
self._n_numerical = self._n_variables - self._n_categorical
# Check selection criteria
if self.selection_criteria is not None:
_check_selection_criteria(self.selection_criteria,
self._target_dtype)
# Initialize bsketch for each variable. To avoid mixed dtypes
# the user must provide a dtype for all variables. This differs
# from the BinningProcess, where dtypes are inferred.
for name in self.variable_names:
if (self.categorical_variables is not None and
name in self.categorical_variables):
dtype = "categorical"
else:
dtype = "numerical"
optb = OptimalBinningSketch(
name=name, dtype=dtype,
max_n_prebins=self.max_n_prebins,
min_n_bins=self.min_n_bins,
max_n_bins=self.max_n_bins,
min_bin_size=self.min_bin_size,
max_pvalue=self.max_pvalue,
max_pvalue_policy=self.max_pvalue_policy,
special_codes=self.special_codes,
split_digits=self.split_digits)
if self.binning_fit_params is not None:
params = self.binning_fit_params.get(name, {})
else:
params = {}
optb.set_params(**params)
self._variable_dtypes[name] = dtype
self._binned_variables[name] = optb
self._is_started = True
# Add new data stream
time_add = time.perf_counter()
# Add data to variables that appear in X. During training the
# data columns might change, for example, not all data sources
# contain the same variables.
for name in X.columns:
if name in self.variable_names:
if self.verbose:
logger.info("Add variable: {}.".format(name))
self._binned_variables[name].add(X[name], y, check_input)
# Update count samples and addition operations
self._n_samples += X.shape[0]
self._n_add += 1
self._time_streaming_add += time.perf_counter() - time_add
if self.verbose:
logger.info("Sketch: added new data.")
return self
| (self, X, y, check_input=False) |
4,571 | optbinning.binning.distributed.binning_process_sketch | get_binned_variable | Return optimal binning sketch object for a given variable name.
Parameters
----------
name : string
The variable name.
| def get_binned_variable(self, name):
"""Return optimal binning sketch object for a given variable name.
Parameters
----------
name : string
The variable name.
"""
self._check_is_solved()
if not isinstance(name, str):
raise TypeError("name must be a string.")
if name in self.variable_names:
return self._binned_variables[name]
else:
raise ValueError("name {} does not match a binned variable."
.format(name))
| (self, name) |
4,574 | optbinning.binning.distributed.binning_process_sketch | get_support | Get a mask, or integer index, or names of the variables selected.
Parameters
----------
indices : boolean (default=False)
If True, the return value will be an array of integers, rather
than a boolean mask.
names : boolean (default=False)
If True, the return value will be an array of strings, rather
than a boolean mask.
Returns
-------
support : array
An index that selects the retained features from a feature vector.
If `indices` is False, this is a boolean array of shape
[# input features], in which an element is True iff its
corresponding feature is selected for retention. If `indices` is
True, this is an integer array of shape [# output features] whose
values are indices into the input feature vector. If `names` is
True, this is an string array of sahpe [# output features], whose
values are names of the selected features.
| def get_support(self, indices=False, names=False):
"""Get a mask, or integer index, or names of the variables selected.
Parameters
----------
indices : boolean (default=False)
If True, the return value will be an array of integers, rather
than a boolean mask.
names : boolean (default=False)
If True, the return value will be an array of strings, rather
than a boolean mask.
Returns
-------
support : array
An index that selects the retained features from a feature vector.
If `indices` is False, this is a boolean array of shape
[# input features], in which an element is True iff its
corresponding feature is selected for retention. If `indices` is
True, this is an integer array of shape [# output features] whose
values are indices into the input feature vector. If `names` is
True, this is an string array of sahpe [# output features], whose
values are names of the selected features.
"""
self._check_is_solved()
if indices and names:
raise ValueError("Only indices or names can be True.")
mask = self._support
if indices:
return np.where(mask)[0]
elif names:
return np.asarray(self.variable_names)[mask]
else:
return mask
| (self, indices=False, names=False) |
4,575 | optbinning.binning.distributed.binning_process_sketch | information | Print overview information about the options settings and
statistics.
Parameters
----------
print_level : int (default=1)
Level of details.
| def information(self, print_level=1):
"""Print overview information about the options settings and
statistics.
Parameters
----------
print_level : int (default=1)
Level of details.
"""
self._check_is_solved()
if not isinstance(print_level, numbers.Integral) or print_level < 0:
raise ValueError("print_level must be an integer >= 0; got {}."
.format(print_level))
self._n_selected = np.count_nonzero(self._support)
dict_user_options = self.get_params()
print_binning_process_sketch_information(
print_level, self._n_samples, self._n_variables,
self._target_dtype, self._n_numerical, self._n_categorical,
self._n_selected, self._n_add, self._time_streaming_add,
self._n_solve, self._time_streaming_solve, dict_user_options)
| (self, print_level=1) |
4,576 | optbinning.binning.distributed.binning_process_sketch | merge | Merge current instance with another BinningProcessSketch instance.
Parameters
----------
bpsketch : object
BinningProcessSketch instance.
| def merge(self, bpsketch):
"""Merge current instance with another BinningProcessSketch instance.
Parameters
----------
bpsketch : object
BinningProcessSketch instance.
"""
if not self.mergeable(bpsketch):
raise Exception("bpsketch does not share signature.")
for name in self.variable_names:
self._binned_variables[name].merge(
bpsketch._binned_variables[name])
if self.verbose:
logger.info("Sketch: current sketch was merged.")
| (self, bpsketch) |
4,577 | optbinning.binning.distributed.binning_process_sketch | mergeable | Check whether two BinningProcessSketch instances can be merged.
Parameters
----------
bpsketch : object
BinningProcessSketch instance.
Returns
-------
mergeable : bool
| def mergeable(self, bpsketch):
"""Check whether two BinningProcessSketch instances can be merged.
Parameters
----------
bpsketch : object
BinningProcessSketch instance.
Returns
-------
mergeable : bool
"""
return self.get_params() == bpsketch.get_params()
| (self, bpsketch) |
4,581 | optbinning.binning.distributed.binning_process_sketch | solve | Solve optimal binning for all variables using added data.
Returns
-------
self : BinningProcessSketch
Current fitted binning process.
| def solve(self):
"""Solve optimal binning for all variables using added data.
Returns
-------
self : BinningProcessSketch
Current fitted binning process.
"""
time_init = time.perf_counter()
# Check if data was added
if not self._n_add:
raise NotDataAddedError(
"No data was added. Add data before solving.")
for i, name in enumerate(self.variable_names):
if self.verbose:
logger.info("Binning variable ({} / {}): {}."
.format(i, self._n_variables, name))
self._binned_variables[name].solve()
if self.verbose:
logger.info("Binning process variable selection...")
# Compute binning statistics and decide whether a variable is selected
self._binning_selection_criteria()
self._time_streaming_solve += time.perf_counter() - time_init
self._n_solve += 1
# Completed successfully
self._is_solved = True
return self
| (self) |
4,582 | optbinning.binning.distributed.binning_process_sketch | summary | Binning process summary with main statistics for all binned
variables.
Parameters
----------
df_summary : pandas.DataFrame
Binning process summary.
| def summary(self):
"""Binning process summary with main statistics for all binned
variables.
Parameters
----------
df_summary : pandas.DataFrame
Binning process summary.
"""
self._check_is_solved()
df_summary = pd.DataFrame.from_dict(self._variable_stats).T
df_summary.reset_index(inplace=True)
df_summary.rename(columns={"index": "name"}, inplace=True)
df_summary["selected"] = self._support
columns = ["name", "dtype", "status", "selected", "n_bins"]
columns += _METRICS[self._target_dtype]["metrics"]
return df_summary[columns]
| (self) |
4,583 | optbinning.binning.distributed.binning_process_sketch | transform | Transform given data to metric using bins from each fitted optimal
binning.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
X_new : pandas.DataFrame, shape = (n_samples, n_features_new)
Transformed array.
| def transform(self, X, metric="woe", metric_special=0, metric_missing=0,
show_digits=2, check_input=False):
"""Transform given data to metric using bins from each fitted optimal
binning.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
X_new : pandas.DataFrame, shape = (n_samples, n_features_new)
Transformed array.
"""
self._check_is_solved()
# Check X dtype
if not isinstance(X, pd.DataFrame):
raise TypeError("X must be a pandas.DataFrame.")
n_samples, n_variables = X.shape
# Check metric
if metric not in ("event_rate", "woe"):
raise ValueError('Invalid value for metric. Allowed string '
'values are "event_rate" and "woe".')
mask = self.get_support()
if not mask.any():
warn("No variables were selected: either the data is"
" too noisy or the selection_criteria too strict.",
UserWarning)
return np.empty(0).reshape((n_samples, 0))
selected_variables = self.get_support(names=True)
for name in selected_variables:
if name not in X.columns:
raise ValueError("Selected variable {} must be a column "
"in the input dataframe.".format(name))
n_selected_variables = len(selected_variables)
if metric == "indices":
X_transform = np.full(
(n_samples, n_selected_variables), -1, dtype=int)
elif metric == "bins":
X_transform = np.full(
(n_samples, n_selected_variables), "", dtype=object)
else:
X_transform = np.zeros((n_samples, n_selected_variables))
for i, name in enumerate(selected_variables):
optb = self._binned_variables[name]
x = X[name]
params = {}
if self.binning_transform_params is not None:
params = self.binning_transform_params.get(name, {})
metric_missing = params.get("metric_missing", metric_missing)
metric_special = params.get("metric_special", metric_special)
tparams = {
"x": x,
"metric": metric,
"metric_special": metric_special,
"metric_missing": metric_missing,
"check_input": check_input,
"show_digits": show_digits
}
if metric is not None:
tparams["metric"] = params.get("metric", metric)
else:
tparams.pop("metric")
X_transform[:, i] = optb.transform(**tparams)
return pd.DataFrame(X_transform, columns=selected_variables)
| (self, X, metric='woe', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,584 | optbinning.binning.continuous_binning | ContinuousOptimalBinning | Optimal binning of a numerical or categorical variable with respect to a
continuous target.
Parameters
----------
name : str, optional (default="")
The variable name.
dtype : str, optional (default="numerical")
The variable data type. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "quantile" to generate prebins with approximately same
frequency and "uniform" to generate prebins with equal width. Method
"cart" uses `sklearn.tree.DecisionTreeRegressor
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeRegressor.html>`_.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend : str or None, optional (default="auto")
The **mean** monotonic trend. Supported trends are “auto”,
"auto_heuristic" and "auto_asc_desc" to automatically determine the
trend minimize the L1-norm using a machine learning classifier,
"ascending", "descending", "concave", "convex", "peak" and
"peak_heuristic" to allow a peak change point, and "valley" and
"valley_heuristic" to allow a valley change point. Trends
"auto_heuristic", "peak_heuristic" and "valley_heuristic" use a
heuristic to determine the change point, and are significantly faster
for large size instances (``max_n_prebins> 20``). Trend "auto_asc_desc"
is used to automatically select the best monotonic trend between
"ascending" and "descending". If None, then the monotonic constraint
is disabled.
min_mean_diff : float, optional (default=0)
The minimum mean difference between consecutives bins.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The T-test is used to detect bins
not satisfying the p-value constraint.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
gamma : float, optional (default=0)
Regularization strength to reduce the number of dominating bins. Larger
values specify stronger regularization.
.. versionadded:: 0.14.0
outlier_detector : str or None, optional (default=None)
The outlier detection method. Supported methods are "range" to use
the interquartile range based method, "zcore" to use the modified
Z-score method or "yquantile" to use the y-axis detector over
quantiles.
outlier_params : dict or None, optional (default=None)
Dictionary of parameters to pass to the outlier detection method.
cat_cutoff : float or None, optional (default=None)
Generate bin others with categories in which the fraction of
occurrences is below the ``cat_cutoff`` value. This option is
available when ``dtype`` is "categorical".
cat_unknown : float, str or None (default=None)
The assigned value to the unobserved categories in training but
occurring during transform.
If None, the assigned value to an unknown category follows this rule:
- if transform metric == 'mean' then mean target
- if transform metric == 'indices' then -1
- if transform metric == 'bins' then 'unknown'
.. versionadded:: 0.17.1
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points when ``dtype`` is "numerical" or
the list of prebins when ``dtype`` is "categorical".
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
special_codes : array-like, dict or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
**prebinning_kwargs : keyword arguments
The pre-binning keywrord arguments.
.. versionadded:: 0.6.1
Notes
-----
The parameter values ``max_n_prebins`` and ``min_prebin_size`` control
complexity and memory usage. The default values generally produce quality
results, however, some improvement can be achieved by increasing
``max_n_prebins`` and/or decreasing ``min_prebin_size``.
The pre-binning refinement phase guarantee that no prebin has zero number
of records by merging those pure prebins. Pure bins produce infinity mean.
| class ContinuousOptimalBinning(OptimalBinning):
"""Optimal binning of a numerical or categorical variable with respect to a
continuous target.
Parameters
----------
name : str, optional (default="")
The variable name.
dtype : str, optional (default="numerical")
The variable data type. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "quantile" to generate prebins with approximately same
frequency and "uniform" to generate prebins with equal width. Method
"cart" uses `sklearn.tree.DecisionTreeRegressor
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeRegressor.html>`_.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend : str or None, optional (default="auto")
The **mean** monotonic trend. Supported trends are “auto”,
"auto_heuristic" and "auto_asc_desc" to automatically determine the
trend minimize the L1-norm using a machine learning classifier,
"ascending", "descending", "concave", "convex", "peak" and
"peak_heuristic" to allow a peak change point, and "valley" and
"valley_heuristic" to allow a valley change point. Trends
"auto_heuristic", "peak_heuristic" and "valley_heuristic" use a
heuristic to determine the change point, and are significantly faster
for large size instances (``max_n_prebins> 20``). Trend "auto_asc_desc"
is used to automatically select the best monotonic trend between
"ascending" and "descending". If None, then the monotonic constraint
is disabled.
min_mean_diff : float, optional (default=0)
The minimum mean difference between consecutives bins.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The T-test is used to detect bins
not satisfying the p-value constraint.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
gamma : float, optional (default=0)
Regularization strength to reduce the number of dominating bins. Larger
values specify stronger regularization.
.. versionadded:: 0.14.0
outlier_detector : str or None, optional (default=None)
The outlier detection method. Supported methods are "range" to use
the interquartile range based method, "zcore" to use the modified
Z-score method or "yquantile" to use the y-axis detector over
quantiles.
outlier_params : dict or None, optional (default=None)
Dictionary of parameters to pass to the outlier detection method.
cat_cutoff : float or None, optional (default=None)
Generate bin others with categories in which the fraction of
occurrences is below the ``cat_cutoff`` value. This option is
available when ``dtype`` is "categorical".
cat_unknown : float, str or None (default=None)
The assigned value to the unobserved categories in training but
occurring during transform.
If None, the assigned value to an unknown category follows this rule:
- if transform metric == 'mean' then mean target
- if transform metric == 'indices' then -1
- if transform metric == 'bins' then 'unknown'
.. versionadded:: 0.17.1
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points when ``dtype`` is "numerical" or
the list of prebins when ``dtype`` is "categorical".
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
special_codes : array-like, dict or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
**prebinning_kwargs : keyword arguments
The pre-binning keywrord arguments.
.. versionadded:: 0.6.1
Notes
-----
The parameter values ``max_n_prebins`` and ``min_prebin_size`` control
complexity and memory usage. The default values generally produce quality
results, however, some improvement can be achieved by increasing
``max_n_prebins`` and/or decreasing ``min_prebin_size``.
The pre-binning refinement phase guarantee that no prebin has zero number
of records by merging those pure prebins. Pure bins produce infinity mean.
"""
def __init__(self, name="", dtype="numerical", prebinning_method="cart",
max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None,
max_n_bins=None, min_bin_size=None, max_bin_size=None,
monotonic_trend="auto", min_mean_diff=0, max_pvalue=None,
max_pvalue_policy="consecutive", gamma=0,
outlier_detector=None, outlier_params=None, cat_cutoff=None,
cat_unknown=None, user_splits=None, user_splits_fixed=None,
special_codes=None, split_digits=None, time_limit=100,
verbose=False, **prebinning_kwargs):
self.name = name
self.dtype = dtype
self.prebinning_method = prebinning_method
self.solver = "cp"
self.max_n_prebins = max_n_prebins
self.min_prebin_size = min_prebin_size
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.monotonic_trend = monotonic_trend
self.min_mean_diff = min_mean_diff
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.gamma = gamma
self.outlier_detector = outlier_detector
self.outlier_params = outlier_params
self.cat_cutoff = cat_cutoff
self.cat_unknown = cat_unknown
self.user_splits = user_splits
self.user_splits_fixed = user_splits_fixed
self.special_codes = special_codes
self.split_digits = split_digits
self.time_limit = time_limit
self.verbose = verbose
self.prebinning_kwargs = prebinning_kwargs
# auxiliary
self._categories = None
self._cat_others = None
self._n_records = None
self._sums = None
self._stds = None
self._min_target = None
self._max_target = None
self._n_zeros = None
self._n_records_cat_others = None
self._n_records_missing = None
self._n_records_special = None
self._sum_cat_others = None
self._sum_special = None
self._sum_missing = None
self._std_cat_others = None
self._std_special = None
self._std_missing = None
self._min_target_missing = None
self._min_target_special = None
self._min_target_others = None
self._max_target_missing = None
self._max_target_special = None
self._max_target_others = None
self._n_zeros_missing = None
self._n_zeros_special = None
self._n_zeros_others = None
self._problem_type = "regression"
# info
self._binning_table = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples = None
self._optimizer = None
self._splits_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
def fit(self, x, y, sample_weight=None, check_input=False):
"""Fit the optimal binning according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : ContinuousOptimalBinning
Fitted optimal binning.
"""
return self._fit(x, y, sample_weight, check_input)
def fit_transform(self, x, y, sample_weight=None, metric="mean",
metric_special=0, metric_missing=0, show_digits=2,
check_input=False):
"""Fit the optimal binning according to the given training data, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``.
metric : str (default="mean"):
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, sample_weight, check_input).transform(
x, metric, metric_special, metric_missing, show_digits,
check_input)
def transform(self, x, metric="mean", metric_special=0, metric_missing=0,
show_digits=2, check_input=False):
"""Transform given data to mean using bins from the fitted
optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="mean"):
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero mean.
"""
self._check_is_fitted()
return transform_continuous_target(self._splits_optimal, self.dtype,
x, self._n_records, self._sums,
self.special_codes,
self._categories, self._cat_others,
self.cat_unknown, metric,
metric_special, metric_missing,
self.user_splits, show_digits,
check_input)
def _fit(self, x, y, sample_weight, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
y_others, categories, cat_others, sw_clean, sw_missing, sw_special,
sw_others] = split_data(
self.dtype, x, y, self.special_codes, self.cat_cutoff,
self.user_splits, check_input, self.outlier_detector,
self.outlier_params, None, None, None, sample_weight)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.outlier_detector is not None:
n_outlier = self._n_samples-(n_clean + n_missing + n_special)
logger.info("Pre-processing: number of outlier samples: {}"
.format(n_outlier))
if self.dtype == "categorical":
n_categories = len(categories)
n_categories_others = len(cat_others)
n_others = len(y_others)
logger.info("Pre-processing: number of others samples: {}"
.format(n_others))
logger.info("Pre-processing: number of categories: {}"
.format(n_categories))
logger.info("Pre-processing: number of categories others: {}"
.format(n_categories_others))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
if self.user_splits is not None:
n_splits = len(self.user_splits)
if self.verbose:
logger.info("Pre-binning: user splits supplied: {}"
.format(n_splits))
if not n_splits:
splits = self.user_splits
n_records = np.array([])
sums = np.array([])
stds = np.array([])
else:
if self.dtype == "numerical":
user_splits = check_array(
self.user_splits, ensure_2d=False, dtype=None,
force_all_finite=True)
if len(set(user_splits)) != len(user_splits):
raise ValueError("User splits are not unique.")
sorted_idx = np.argsort(user_splits)
user_splits = user_splits[sorted_idx]
else:
[categories, user_splits, x_clean, y_clean, y_others,
cat_others, sw_clean, sw_others, sorted_idx
] = preprocessing_user_splits_categorical(
self.user_splits, x_clean, y_clean, sw_clean)
if self.user_splits_fixed is not None:
self.user_splits_fixed = np.asarray(
self.user_splits_fixed)[sorted_idx]
[splits, n_records, sums, ssums, stds, min_t, max_t,
n_zeros] = self._prebinning_refinement(
user_splits, x_clean, y_clean, y_missing, x_special,
y_special, y_others, sw_clean, sw_missing, sw_special,
sw_others)
else:
[splits, n_records, sums, ssums, stds, min_t, max_t,
n_zeros] = self._fit_prebinning(
x_clean, y_clean, y_missing, x_special, y_special, y_others,
None, sw_clean, sw_missing, sw_special, sw_others)
self._n_prebins = len(n_records)
self._categories = categories
self._cat_others = cat_others
self._time_prebinning = time.perf_counter() - time_prebinning
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
self._fit_optimizer(splits, n_records, sums, ssums, stds)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
if not len(splits):
n_records = np.sum(sw_clean)
sw_y_clean = sw_clean * y_clean
sums = np.sum(sw_y_clean)
ssums = np.sum(sw_y_clean ** 2)
n_zeros = np.count_nonzero(sw_y_clean == 0)
stds = np.std(sw_y_clean)
min_t = np.min(sw_y_clean)
max_t = np.max(sw_y_clean)
[self._n_records, self._sums, self._stds, self._min_target,
self._max_target, self._n_zeros] = continuous_bin_info(
self._solution, n_records, sums, ssums, stds, min_t, max_t,
n_zeros, self._n_records_missing, self._sum_missing,
self._std_missing, self._min_target_missing,
self._max_target_missing, self._n_zeros_missing,
self._n_records_special, self._sum_special, self._std_special,
self._min_target_special, self._max_target_special,
self._n_zeros_special, self._n_records_cat_others,
self._sum_cat_others, self._std_cat_others,
self._min_target_others, self._max_target_others,
self._n_zeros_others, self._cat_others)
if self.dtype == "numerical":
min_x = x_clean.min()
max_x = x_clean.max()
else:
min_x = None
max_x = None
self._binning_table = ContinuousBinningTable(
self.name, self.dtype, self.special_codes, self._splits_optimal,
self._n_records, self._sums, self._stds, self._min_target,
self._max_target, self._n_zeros, min_x, max_x, self._categories,
self._cat_others, self.user_splits)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
def _fit_optimizer(self, splits, n_records, sums, ssums, stds):
if self.verbose:
logger.info("Optimizer started.")
time_init = time.perf_counter()
if len(n_records) <= 1:
self._status = "OPTIMAL"
self._splits_optimal = splits
self._solution = np.zeros(len(splits)).astype(bool)
if self.verbose:
logger.warning("Optimizer: {} bins after pre-binning."
.format(len(n_records)))
logger.warning("Optimizer: solver not run.")
logger.info("Optimizer terminated. Time: 0s")
return
if self.min_bin_size is not None:
min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples))
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples))
else:
max_bin_size = self.max_bin_size
# Monotonic trend
trend_change = None
if self.dtype == "numerical":
auto_monotonic_modes = ("auto", "auto_heuristic", "auto_asc_desc")
if self.monotonic_trend in auto_monotonic_modes:
monotonic = auto_monotonic_continuous(
n_records, sums, self.monotonic_trend)
if self.monotonic_trend == "auto_heuristic":
if monotonic in ("peak", "valley"):
if monotonic == "peak":
monotonic = "peak_heuristic"
else:
monotonic = "valley_heuristic"
mean = sums / n_records
trend_change = peak_valley_trend_change_heuristic(
mean, monotonic)
if self.verbose:
logger.info("Optimizer: classifier predicts {} "
"monotonic trend.".format(monotonic))
else:
monotonic = self.monotonic_trend
if monotonic in ("peak_heuristic", "valley_heuristic"):
mean = sums / n_records
trend_change = peak_valley_trend_change_heuristic(
mean, monotonic)
else:
monotonic = self.monotonic_trend
if monotonic is not None:
monotonic = "ascending"
if self.verbose:
if monotonic is None:
logger.info(
"Optimizer: monotonic trend not set.")
else:
logger.info("Optimizer: monotonic trend set to {}."
.format(monotonic))
optimizer = ContinuousBinningCP(monotonic, self.min_n_bins,
self.max_n_bins, min_bin_size,
max_bin_size, self.min_mean_diff,
self.max_pvalue,
self.max_pvalue_policy, self.gamma,
self.user_splits_fixed,
self.time_limit)
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model(n_records, sums, ssums, trend_change)
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
if self.dtype == "categorical" and self.user_splits is not None:
self._splits_optimal = splits[solution]
else:
self._splits_optimal = splits[solution[:-1]]
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
def _prebinning_refinement(self, splits_prebinning, x, y, y_missing,
x_special, y_special, y_others, sw_clean,
sw_missing, sw_special, sw_others):
# Compute n_records, sum and std for special, missing and others
[self._n_records_special, self._sum_special, self._n_zeros_special,
self._std_special, self._min_target_special,
self._max_target_special] = target_info_special_continuous(
self.special_codes, x_special, y_special, sw_special)
if len(sw_missing):
y_missing = y_missing * sw_missing
self._n_records_missing = np.sum(sw_missing)
self._sum_missing = np.sum(y_missing)
self._n_zeros_missing = np.count_nonzero(y_missing == 0)
if len(y_missing):
self._std_missing = np.std(y_missing)
self._min_target_missing = np.min(y_missing)
self._max_target_missing = np.max(y_missing)
if len(y_others):
if len(sw_others):
print(y_others.dtype, sw_others.dtype)
y_others = y_others * sw_others
self._n_records_cat_others = np.sum(sw_others)
self._sum_cat_others = np.sum(y_others)
self._std_cat_others = np.std(y_others)
self._min_target_others = np.min(y_others)
self._max_target_others = np.max(y_others)
self._n_zeros_others = np.count_nonzero(y_others == 0)
n_splits = len(splits_prebinning)
if not n_splits:
return (splits_prebinning, np.array([]), np.array([]),
np.array([]), np.array([]), np.array([]), np.array([]),
np.array([]))
if self.split_digits is not None:
splits_prebinning = np.round(splits_prebinning, self.split_digits)
(splits_prebinning, n_records, sums, ssums, stds, min_t, max_t,
n_zeros) = self._compute_prebins(splits_prebinning, x, y, sw_clean)
return (splits_prebinning, n_records, sums, ssums, stds, min_t, max_t,
n_zeros)
def _compute_prebins(self, splits_prebinning, x, y, sw):
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
if self.dtype == "categorical" and self.user_splits is not None:
indices = np.digitize(x, splits_prebinning, right=True)
n_bins = n_splits
else:
indices = np.digitize(x, splits_prebinning, right=False)
n_bins = n_splits + 1
n_records = np.empty(n_bins, dtype=np.int64)
sums = np.empty(n_bins)
ssums = np.empty(n_bins)
stds = np.zeros(n_bins)
n_zeros = np.empty(n_bins, dtype=np.int64)
min_t = np.full(n_bins, -np.inf)
max_t = np.full(n_bins, np.inf)
# Compute prebin information
for i in range(n_bins):
mask = (indices == i)
n_records[i] = np.sum(sw[mask])
ymask = sw[mask] * y[mask]
sums[i] = np.sum(ymask)
ssums[i] = np.sum(ymask ** 2)
n_zeros[i] = np.count_nonzero(ymask == 0)
if len(ymask):
stds[i] = np.std(ymask)
min_t[i] = np.min(ymask)
max_t[i] = np.max(ymask)
mask_remove = (n_records == 0)
if np.any(mask_remove):
self._n_refinements += 1
if (self.dtype == "categorical" and
self.user_splits is not None):
mask_splits = mask_remove
else:
mask_splits = np.concatenate([
mask_remove[:-2], [mask_remove[-2] | mask_remove[-1]]])
if self.user_splits_fixed is not None:
user_splits_fixed = np.asarray(self.user_splits_fixed)
user_splits = np.asarray(self.user_splits)
fixed_remove = user_splits_fixed & mask_splits
if any(fixed_remove):
raise ValueError(
"Fixed user_splits {} are removed "
"because produce pure prebins. Provide "
"different splits to be fixed."
.format(user_splits[fixed_remove]))
# Update boolean array of fixed user splits.
self.user_splits_fixed = user_splits_fixed[~mask_splits]
self.user_splits = user_splits[~mask_splits]
splits = splits_prebinning[~mask_splits]
if self.verbose:
logger.info("Pre-binning: number prebins removed: {}"
.format(np.count_nonzero(mask_remove)))
(splits_prebinning, n_records, sums, ssums, stds, min_t, max_t,
n_zeros) = self._compute_prebins(splits, x, y, sw)
return (splits_prebinning, n_records, sums, ssums, stds, min_t, max_t,
n_zeros)
@property
def binning_table(self):
"""Return an instantiated binning table. Please refer to
:ref:`Binning table: continuous target`.
Returns
-------
binning_table : ContinuousBinningTable.
"""
self._check_is_fitted()
return self._binning_table
def to_json(self, path):
"""
Save optimal bins and/or splits points and transformation depending on
the target type.
Parameters
----------
path: The path where the json is going to be saved.
"""
if path is None:
raise ValueError('Specify the path for the json file.')
table = self.binning_table
opt_bin_dict = dict()
opt_bin_dict['name'] = table.name
opt_bin_dict['dtype'] = table.dtype
opt_bin_dict['special_codes'] = table.special_codes
if table.dtype == 'numerical':
opt_bin_dict['splits'] = table.splits.tolist()
elif table.dtype == 'categorical':
opt_bin_dict['splits'] = [split.tolist() for split in table.splits]
opt_bin_dict['n_records'] = table.n_records.tolist()
opt_bin_dict['sums'] = table.sums.tolist()
opt_bin_dict['stds'] = table.stds.tolist()
opt_bin_dict['min_target'] = table.min_target.tolist()
opt_bin_dict['max_target'] = table.max_target.tolist()
opt_bin_dict['n_zeros'] = table.n_zeros.tolist()
opt_bin_dict['min_x'] = table.min_x
opt_bin_dict['max_x'] = table.max_x
opt_bin_dict['categories'] = table.categories
opt_bin_dict['cat_others'] = table.cat_others
opt_bin_dict['user_splits'] = table.user_splits
with open(path, "w") as write_file:
json.dump(opt_bin_dict, write_file)
def read_json(self, path):
"""
Read json file containing split points and set them as the new split
points.
Parameters
----------
path: The path of the json file.
"""
if path is None:
raise ValueError('Specify the path for the json file.')
self._is_fitted = True
with open(path, "r") as read_file:
cont_table_attr = json.load(read_file)
for key in cont_table_attr.keys():
if isinstance(cont_table_attr[key], list):
cont_table_attr[key] = np.array(cont_table_attr[key])
self._binning_table = ContinuousBinningTable(**cont_table_attr)
| (name='', dtype='numerical', prebinning_method='cart', max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend='auto', min_mean_diff=0, max_pvalue=None, max_pvalue_policy='consecutive', gamma=0, outlier_detector=None, outlier_params=None, cat_cutoff=None, cat_unknown=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, time_limit=100, verbose=False, **prebinning_kwargs) |
4,586 | optbinning.binning.continuous_binning | __init__ | null | def __init__(self, name="", dtype="numerical", prebinning_method="cart",
max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None,
max_n_bins=None, min_bin_size=None, max_bin_size=None,
monotonic_trend="auto", min_mean_diff=0, max_pvalue=None,
max_pvalue_policy="consecutive", gamma=0,
outlier_detector=None, outlier_params=None, cat_cutoff=None,
cat_unknown=None, user_splits=None, user_splits_fixed=None,
special_codes=None, split_digits=None, time_limit=100,
verbose=False, **prebinning_kwargs):
self.name = name
self.dtype = dtype
self.prebinning_method = prebinning_method
self.solver = "cp"
self.max_n_prebins = max_n_prebins
self.min_prebin_size = min_prebin_size
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.monotonic_trend = monotonic_trend
self.min_mean_diff = min_mean_diff
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.gamma = gamma
self.outlier_detector = outlier_detector
self.outlier_params = outlier_params
self.cat_cutoff = cat_cutoff
self.cat_unknown = cat_unknown
self.user_splits = user_splits
self.user_splits_fixed = user_splits_fixed
self.special_codes = special_codes
self.split_digits = split_digits
self.time_limit = time_limit
self.verbose = verbose
self.prebinning_kwargs = prebinning_kwargs
# auxiliary
self._categories = None
self._cat_others = None
self._n_records = None
self._sums = None
self._stds = None
self._min_target = None
self._max_target = None
self._n_zeros = None
self._n_records_cat_others = None
self._n_records_missing = None
self._n_records_special = None
self._sum_cat_others = None
self._sum_special = None
self._sum_missing = None
self._std_cat_others = None
self._std_special = None
self._std_missing = None
self._min_target_missing = None
self._min_target_special = None
self._min_target_others = None
self._max_target_missing = None
self._max_target_special = None
self._max_target_others = None
self._n_zeros_missing = None
self._n_zeros_special = None
self._n_zeros_others = None
self._problem_type = "regression"
# info
self._binning_table = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples = None
self._optimizer = None
self._splits_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
| (self, name='', dtype='numerical', prebinning_method='cart', max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend='auto', min_mean_diff=0, max_pvalue=None, max_pvalue_policy='consecutive', gamma=0, outlier_detector=None, outlier_params=None, cat_cutoff=None, cat_unknown=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, time_limit=100, verbose=False, **prebinning_kwargs) |
4,593 | optbinning.binning.continuous_binning | _compute_prebins | null | def _compute_prebins(self, splits_prebinning, x, y, sw):
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
if self.dtype == "categorical" and self.user_splits is not None:
indices = np.digitize(x, splits_prebinning, right=True)
n_bins = n_splits
else:
indices = np.digitize(x, splits_prebinning, right=False)
n_bins = n_splits + 1
n_records = np.empty(n_bins, dtype=np.int64)
sums = np.empty(n_bins)
ssums = np.empty(n_bins)
stds = np.zeros(n_bins)
n_zeros = np.empty(n_bins, dtype=np.int64)
min_t = np.full(n_bins, -np.inf)
max_t = np.full(n_bins, np.inf)
# Compute prebin information
for i in range(n_bins):
mask = (indices == i)
n_records[i] = np.sum(sw[mask])
ymask = sw[mask] * y[mask]
sums[i] = np.sum(ymask)
ssums[i] = np.sum(ymask ** 2)
n_zeros[i] = np.count_nonzero(ymask == 0)
if len(ymask):
stds[i] = np.std(ymask)
min_t[i] = np.min(ymask)
max_t[i] = np.max(ymask)
mask_remove = (n_records == 0)
if np.any(mask_remove):
self._n_refinements += 1
if (self.dtype == "categorical" and
self.user_splits is not None):
mask_splits = mask_remove
else:
mask_splits = np.concatenate([
mask_remove[:-2], [mask_remove[-2] | mask_remove[-1]]])
if self.user_splits_fixed is not None:
user_splits_fixed = np.asarray(self.user_splits_fixed)
user_splits = np.asarray(self.user_splits)
fixed_remove = user_splits_fixed & mask_splits
if any(fixed_remove):
raise ValueError(
"Fixed user_splits {} are removed "
"because produce pure prebins. Provide "
"different splits to be fixed."
.format(user_splits[fixed_remove]))
# Update boolean array of fixed user splits.
self.user_splits_fixed = user_splits_fixed[~mask_splits]
self.user_splits = user_splits[~mask_splits]
splits = splits_prebinning[~mask_splits]
if self.verbose:
logger.info("Pre-binning: number prebins removed: {}"
.format(np.count_nonzero(mask_remove)))
(splits_prebinning, n_records, sums, ssums, stds, min_t, max_t,
n_zeros) = self._compute_prebins(splits, x, y, sw)
return (splits_prebinning, n_records, sums, ssums, stds, min_t, max_t,
n_zeros)
| (self, splits_prebinning, x, y, sw) |
4,594 | optbinning.binning.continuous_binning | _fit | null | def _fit(self, x, y, sample_weight, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
y_others, categories, cat_others, sw_clean, sw_missing, sw_special,
sw_others] = split_data(
self.dtype, x, y, self.special_codes, self.cat_cutoff,
self.user_splits, check_input, self.outlier_detector,
self.outlier_params, None, None, None, sample_weight)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.outlier_detector is not None:
n_outlier = self._n_samples-(n_clean + n_missing + n_special)
logger.info("Pre-processing: number of outlier samples: {}"
.format(n_outlier))
if self.dtype == "categorical":
n_categories = len(categories)
n_categories_others = len(cat_others)
n_others = len(y_others)
logger.info("Pre-processing: number of others samples: {}"
.format(n_others))
logger.info("Pre-processing: number of categories: {}"
.format(n_categories))
logger.info("Pre-processing: number of categories others: {}"
.format(n_categories_others))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
if self.user_splits is not None:
n_splits = len(self.user_splits)
if self.verbose:
logger.info("Pre-binning: user splits supplied: {}"
.format(n_splits))
if not n_splits:
splits = self.user_splits
n_records = np.array([])
sums = np.array([])
stds = np.array([])
else:
if self.dtype == "numerical":
user_splits = check_array(
self.user_splits, ensure_2d=False, dtype=None,
force_all_finite=True)
if len(set(user_splits)) != len(user_splits):
raise ValueError("User splits are not unique.")
sorted_idx = np.argsort(user_splits)
user_splits = user_splits[sorted_idx]
else:
[categories, user_splits, x_clean, y_clean, y_others,
cat_others, sw_clean, sw_others, sorted_idx
] = preprocessing_user_splits_categorical(
self.user_splits, x_clean, y_clean, sw_clean)
if self.user_splits_fixed is not None:
self.user_splits_fixed = np.asarray(
self.user_splits_fixed)[sorted_idx]
[splits, n_records, sums, ssums, stds, min_t, max_t,
n_zeros] = self._prebinning_refinement(
user_splits, x_clean, y_clean, y_missing, x_special,
y_special, y_others, sw_clean, sw_missing, sw_special,
sw_others)
else:
[splits, n_records, sums, ssums, stds, min_t, max_t,
n_zeros] = self._fit_prebinning(
x_clean, y_clean, y_missing, x_special, y_special, y_others,
None, sw_clean, sw_missing, sw_special, sw_others)
self._n_prebins = len(n_records)
self._categories = categories
self._cat_others = cat_others
self._time_prebinning = time.perf_counter() - time_prebinning
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
self._fit_optimizer(splits, n_records, sums, ssums, stds)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
if not len(splits):
n_records = np.sum(sw_clean)
sw_y_clean = sw_clean * y_clean
sums = np.sum(sw_y_clean)
ssums = np.sum(sw_y_clean ** 2)
n_zeros = np.count_nonzero(sw_y_clean == 0)
stds = np.std(sw_y_clean)
min_t = np.min(sw_y_clean)
max_t = np.max(sw_y_clean)
[self._n_records, self._sums, self._stds, self._min_target,
self._max_target, self._n_zeros] = continuous_bin_info(
self._solution, n_records, sums, ssums, stds, min_t, max_t,
n_zeros, self._n_records_missing, self._sum_missing,
self._std_missing, self._min_target_missing,
self._max_target_missing, self._n_zeros_missing,
self._n_records_special, self._sum_special, self._std_special,
self._min_target_special, self._max_target_special,
self._n_zeros_special, self._n_records_cat_others,
self._sum_cat_others, self._std_cat_others,
self._min_target_others, self._max_target_others,
self._n_zeros_others, self._cat_others)
if self.dtype == "numerical":
min_x = x_clean.min()
max_x = x_clean.max()
else:
min_x = None
max_x = None
self._binning_table = ContinuousBinningTable(
self.name, self.dtype, self.special_codes, self._splits_optimal,
self._n_records, self._sums, self._stds, self._min_target,
self._max_target, self._n_zeros, min_x, max_x, self._categories,
self._cat_others, self.user_splits)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (self, x, y, sample_weight, check_input) |
4,595 | optbinning.binning.continuous_binning | _fit_optimizer | null | def _fit_optimizer(self, splits, n_records, sums, ssums, stds):
if self.verbose:
logger.info("Optimizer started.")
time_init = time.perf_counter()
if len(n_records) <= 1:
self._status = "OPTIMAL"
self._splits_optimal = splits
self._solution = np.zeros(len(splits)).astype(bool)
if self.verbose:
logger.warning("Optimizer: {} bins after pre-binning."
.format(len(n_records)))
logger.warning("Optimizer: solver not run.")
logger.info("Optimizer terminated. Time: 0s")
return
if self.min_bin_size is not None:
min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples))
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples))
else:
max_bin_size = self.max_bin_size
# Monotonic trend
trend_change = None
if self.dtype == "numerical":
auto_monotonic_modes = ("auto", "auto_heuristic", "auto_asc_desc")
if self.monotonic_trend in auto_monotonic_modes:
monotonic = auto_monotonic_continuous(
n_records, sums, self.monotonic_trend)
if self.monotonic_trend == "auto_heuristic":
if monotonic in ("peak", "valley"):
if monotonic == "peak":
monotonic = "peak_heuristic"
else:
monotonic = "valley_heuristic"
mean = sums / n_records
trend_change = peak_valley_trend_change_heuristic(
mean, monotonic)
if self.verbose:
logger.info("Optimizer: classifier predicts {} "
"monotonic trend.".format(monotonic))
else:
monotonic = self.monotonic_trend
if monotonic in ("peak_heuristic", "valley_heuristic"):
mean = sums / n_records
trend_change = peak_valley_trend_change_heuristic(
mean, monotonic)
else:
monotonic = self.monotonic_trend
if monotonic is not None:
monotonic = "ascending"
if self.verbose:
if monotonic is None:
logger.info(
"Optimizer: monotonic trend not set.")
else:
logger.info("Optimizer: monotonic trend set to {}."
.format(monotonic))
optimizer = ContinuousBinningCP(monotonic, self.min_n_bins,
self.max_n_bins, min_bin_size,
max_bin_size, self.min_mean_diff,
self.max_pvalue,
self.max_pvalue_policy, self.gamma,
self.user_splits_fixed,
self.time_limit)
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model(n_records, sums, ssums, trend_change)
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
if self.dtype == "categorical" and self.user_splits is not None:
self._splits_optimal = splits[solution]
else:
self._splits_optimal = splits[solution[:-1]]
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
| (self, splits, n_records, sums, ssums, stds) |
4,596 | optbinning.binning.binning | _fit_prebinning | null | def _fit_prebinning(self, x, y, y_missing, x_special, y_special, y_others,
class_weight=None, sw_clean=None, sw_missing=None,
sw_special=None, sw_others=None):
min_bin_size = int(np.ceil(self.min_prebin_size * self._n_samples))
prebinning = PreBinning(method=self.prebinning_method,
n_bins=self.max_n_prebins,
min_bin_size=min_bin_size,
problem_type=self._problem_type,
class_weight=class_weight,
**self.prebinning_kwargs
).fit(x, y, sw_clean)
return self._prebinning_refinement(prebinning.splits, x, y, y_missing,
x_special, y_special, y_others,
sw_clean, sw_missing, sw_special,
sw_others)
| (self, x, y, y_missing, x_special, y_special, y_others, class_weight=None, sw_clean=None, sw_missing=None, sw_special=None, sw_others=None) |
4,601 | optbinning.binning.continuous_binning | _prebinning_refinement | null | def _prebinning_refinement(self, splits_prebinning, x, y, y_missing,
x_special, y_special, y_others, sw_clean,
sw_missing, sw_special, sw_others):
# Compute n_records, sum and std for special, missing and others
[self._n_records_special, self._sum_special, self._n_zeros_special,
self._std_special, self._min_target_special,
self._max_target_special] = target_info_special_continuous(
self.special_codes, x_special, y_special, sw_special)
if len(sw_missing):
y_missing = y_missing * sw_missing
self._n_records_missing = np.sum(sw_missing)
self._sum_missing = np.sum(y_missing)
self._n_zeros_missing = np.count_nonzero(y_missing == 0)
if len(y_missing):
self._std_missing = np.std(y_missing)
self._min_target_missing = np.min(y_missing)
self._max_target_missing = np.max(y_missing)
if len(y_others):
if len(sw_others):
print(y_others.dtype, sw_others.dtype)
y_others = y_others * sw_others
self._n_records_cat_others = np.sum(sw_others)
self._sum_cat_others = np.sum(y_others)
self._std_cat_others = np.std(y_others)
self._min_target_others = np.min(y_others)
self._max_target_others = np.max(y_others)
self._n_zeros_others = np.count_nonzero(y_others == 0)
n_splits = len(splits_prebinning)
if not n_splits:
return (splits_prebinning, np.array([]), np.array([]),
np.array([]), np.array([]), np.array([]), np.array([]),
np.array([]))
if self.split_digits is not None:
splits_prebinning = np.round(splits_prebinning, self.split_digits)
(splits_prebinning, n_records, sums, ssums, stds, min_t, max_t,
n_zeros) = self._compute_prebins(splits_prebinning, x, y, sw_clean)
return (splits_prebinning, n_records, sums, ssums, stds, min_t, max_t,
n_zeros)
| (self, splits_prebinning, x, y, y_missing, x_special, y_special, y_others, sw_clean, sw_missing, sw_special, sw_others) |
4,606 | optbinning.binning.continuous_binning | fit | Fit the optimal binning according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : ContinuousOptimalBinning
Fitted optimal binning.
| def fit(self, x, y, sample_weight=None, check_input=False):
"""Fit the optimal binning according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : ContinuousOptimalBinning
Fitted optimal binning.
"""
return self._fit(x, y, sample_weight, check_input)
| (self, x, y, sample_weight=None, check_input=False) |
4,607 | optbinning.binning.continuous_binning | fit_transform | Fit the optimal binning according to the given training data, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``.
metric : str (default="mean"):
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
| def fit_transform(self, x, y, sample_weight=None, metric="mean",
metric_special=0, metric_missing=0, show_digits=2,
check_input=False):
"""Fit the optimal binning according to the given training data, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``.
metric : str (default="mean"):
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, sample_weight, check_input).transform(
x, metric, metric_special, metric_missing, show_digits,
check_input)
| (self, x, y, sample_weight=None, metric='mean', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,610 | optbinning.binning.binning | information | Print overview information about the options settings, problem
statistics, and the solution of the computation.
Parameters
----------
print_level : int (default=1)
Level of details.
| def information(self, print_level=1):
"""Print overview information about the options settings, problem
statistics, and the solution of the computation.
Parameters
----------
print_level : int (default=1)
Level of details.
"""
self._check_is_fitted()
if not isinstance(print_level, numbers.Integral) or print_level < 0:
raise ValueError("print_level must be an integer >= 0; got {}."
.format(print_level))
binning_type = self.__class__.__name__.lower()
if self._optimizer is not None:
solver = self._optimizer
time_solver = self._time_solver
else:
solver = None
time_solver = 0
dict_user_options = self.get_params()
print_binning_information(binning_type, print_level, self.name,
self._status, self.solver, solver,
self._time_total, self._time_preprocessing,
self._time_prebinning, time_solver,
self._time_optimizer,
self._time_postprocessing, self._n_prebins,
self._n_refinements, dict_user_options)
| (self, print_level=1) |
4,611 | optbinning.binning.continuous_binning | read_json |
Read json file containing split points and set them as the new split
points.
Parameters
----------
path: The path of the json file.
| def read_json(self, path):
"""
Read json file containing split points and set them as the new split
points.
Parameters
----------
path: The path of the json file.
"""
if path is None:
raise ValueError('Specify the path for the json file.')
self._is_fitted = True
with open(path, "r") as read_file:
cont_table_attr = json.load(read_file)
for key in cont_table_attr.keys():
if isinstance(cont_table_attr[key], list):
cont_table_attr[key] = np.array(cont_table_attr[key])
self._binning_table = ContinuousBinningTable(**cont_table_attr)
| (self, path) |
4,612 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``check_input`` parameter in ``fit``.
sample_weight : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``sample_weight`` parameter in ``fit``.
x : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``x`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: optbinning.binning.continuous_binning.ContinuousOptimalBinning, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$', sample_weight: Union[bool, NoneType, str] = '$UNCHANGED$', x: Union[bool, NoneType, str] = '$UNCHANGED$') -> optbinning.binning.continuous_binning.ContinuousOptimalBinning |
4,614 | sklearn.utils._metadata_requests | set_transform_request | Request metadata passed to the ``transform`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``transform`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``transform``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``check_input`` parameter in ``transform``.
metric : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``metric`` parameter in ``transform``.
metric_missing : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``metric_missing`` parameter in ``transform``.
metric_special : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``metric_special`` parameter in ``transform``.
show_digits : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``show_digits`` parameter in ``transform``.
x : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``x`` parameter in ``transform``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: optbinning.binning.continuous_binning.ContinuousOptimalBinning, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$', metric: Union[bool, NoneType, str] = '$UNCHANGED$', metric_missing: Union[bool, NoneType, str] = '$UNCHANGED$', metric_special: Union[bool, NoneType, str] = '$UNCHANGED$', show_digits: Union[bool, NoneType, str] = '$UNCHANGED$', x: Union[bool, NoneType, str] = '$UNCHANGED$') -> optbinning.binning.continuous_binning.ContinuousOptimalBinning |
4,615 | optbinning.binning.continuous_binning | to_json |
Save optimal bins and/or splits points and transformation depending on
the target type.
Parameters
----------
path: The path where the json is going to be saved.
| def to_json(self, path):
"""
Save optimal bins and/or splits points and transformation depending on
the target type.
Parameters
----------
path: The path where the json is going to be saved.
"""
if path is None:
raise ValueError('Specify the path for the json file.')
table = self.binning_table
opt_bin_dict = dict()
opt_bin_dict['name'] = table.name
opt_bin_dict['dtype'] = table.dtype
opt_bin_dict['special_codes'] = table.special_codes
if table.dtype == 'numerical':
opt_bin_dict['splits'] = table.splits.tolist()
elif table.dtype == 'categorical':
opt_bin_dict['splits'] = [split.tolist() for split in table.splits]
opt_bin_dict['n_records'] = table.n_records.tolist()
opt_bin_dict['sums'] = table.sums.tolist()
opt_bin_dict['stds'] = table.stds.tolist()
opt_bin_dict['min_target'] = table.min_target.tolist()
opt_bin_dict['max_target'] = table.max_target.tolist()
opt_bin_dict['n_zeros'] = table.n_zeros.tolist()
opt_bin_dict['min_x'] = table.min_x
opt_bin_dict['max_x'] = table.max_x
opt_bin_dict['categories'] = table.categories
opt_bin_dict['cat_others'] = table.cat_others
opt_bin_dict['user_splits'] = table.user_splits
with open(path, "w") as write_file:
json.dump(opt_bin_dict, write_file)
| (self, path) |
4,616 | optbinning.binning.continuous_binning | transform | Transform given data to mean using bins from the fitted
optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="mean"):
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero mean.
| def transform(self, x, metric="mean", metric_special=0, metric_missing=0,
show_digits=2, check_input=False):
"""Transform given data to mean using bins from the fitted
optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="mean"):
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero mean.
"""
self._check_is_fitted()
return transform_continuous_target(self._splits_optimal, self.dtype,
x, self._n_records, self._sums,
self.special_codes,
self._categories, self._cat_others,
self.cat_unknown, metric,
metric_special, metric_missing,
self.user_splits, show_digits,
check_input)
| (self, x, metric='mean', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,617 | optbinning.binning.multidimensional.continuous_binning_2d | ContinuousOptimalBinning2D | Optimal binning of two numerical variables with respect to a continuous
target.
Parameters
----------
name_x : str, optional (default="")
The name of variable x.
name_y : str, optional (default="")
The name of variable y.
dtype_x : str, optional (default="numerical")
The data type of variable x. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
dtype_y : str, optional (default="numerical")
The data type of variable y. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "mdlp" for Minimum Description Length Principle (MDLP),
"quantile" to generate prebins with approximately same frequency and
"uniform" to generate prebins with equal width. Method "cart" uses
`sklearn.tree.DecisionTreeRegressor
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeRegressor.html>`_.
strategy: str, optional (default="grid")
The strategy used to create the initial prebinning 2D after computing
prebinning splits on the x and y axis. The strategy "grid" creates a
prebinning 2D with n_prebins_x times n_prebins_y elements. The strategy
"cart" (experimental) reduces the number of elements by pruning. The
latter is recommended when the number of prebins is large.
solver : str, optional (default="cp")
The optimizer to solve the optimal binning problem. Supported solvers
are "mip" to choose a mixed-integer programming solver, and "cp" to
choose a constrained programming solver.
divergence : str, optional (default="iv")
The divergence measure in the objective function to be maximized.
Supported divergences are "iv" (Information Value or Jeffrey's
divergence), "js" (Jensen-Shannon), "hellinger" (Hellinger divergence)
and "triangular" (triangular discrimination).
max_n_prebins_x : int (default=5)
The maximum number of bins on variable x after pre-binning (prebins).
max_n_prebins_y : int (default=5)
The maximum number of bins on variable y after pre-binning (prebins).
min_prebin_size_x : float (default=0.05)
The fraction of mininum number of records for each prebin on variable
x.
min_prebin_size_y : float (default=0.05)
The fraction of mininum number of records for each prebin on variable
y.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend_x : str or None, optional (default=None)
The **mean** monotonic trend on the x axis. Supported trends are
“ascending”, and "descending". If None, then the monotonic constraint
is disabled.
monotonic_trend_y : str or None, optional (default=None)
The **mean** monotonic trend on the y axis. Supported trends are
“ascending”, and "descending". If None, then the monotonic constraint
is disabled.
min_mean_diff_x : float, optional (default=0)
The minimum mean difference between consecutives bins on the x axis.
min_mean_diff_y : float, optional (default=0)
The minimum mean difference between consecutives bins on the y axis.
gamma : float, optional (default=0)
Regularization strength to reduce the number of dominating bins. Larger
values specify stronger regularization.
special_codes_x : array-like or None, optional (default=None)
List of special codes for the variable x. Use special codes to specify
the data values that must be treated separately.
special_codes_y : array-like or None, optional (default=None)
List of special codes for the variable y. Use special codes to specify
the data values that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
n_jobs : int or None, optional (default=None)
Number of cores to run in parallel while binning variables.
``None`` means 1 core. ``-1`` means using all processors.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
| class ContinuousOptimalBinning2D(OptimalBinning2D):
"""Optimal binning of two numerical variables with respect to a continuous
target.
Parameters
----------
name_x : str, optional (default="")
The name of variable x.
name_y : str, optional (default="")
The name of variable y.
dtype_x : str, optional (default="numerical")
The data type of variable x. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
dtype_y : str, optional (default="numerical")
The data type of variable y. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "mdlp" for Minimum Description Length Principle (MDLP),
"quantile" to generate prebins with approximately same frequency and
"uniform" to generate prebins with equal width. Method "cart" uses
`sklearn.tree.DecisionTreeRegressor
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeRegressor.html>`_.
strategy: str, optional (default="grid")
The strategy used to create the initial prebinning 2D after computing
prebinning splits on the x and y axis. The strategy "grid" creates a
prebinning 2D with n_prebins_x times n_prebins_y elements. The strategy
"cart" (experimental) reduces the number of elements by pruning. The
latter is recommended when the number of prebins is large.
solver : str, optional (default="cp")
The optimizer to solve the optimal binning problem. Supported solvers
are "mip" to choose a mixed-integer programming solver, and "cp" to
choose a constrained programming solver.
divergence : str, optional (default="iv")
The divergence measure in the objective function to be maximized.
Supported divergences are "iv" (Information Value or Jeffrey's
divergence), "js" (Jensen-Shannon), "hellinger" (Hellinger divergence)
and "triangular" (triangular discrimination).
max_n_prebins_x : int (default=5)
The maximum number of bins on variable x after pre-binning (prebins).
max_n_prebins_y : int (default=5)
The maximum number of bins on variable y after pre-binning (prebins).
min_prebin_size_x : float (default=0.05)
The fraction of mininum number of records for each prebin on variable
x.
min_prebin_size_y : float (default=0.05)
The fraction of mininum number of records for each prebin on variable
y.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend_x : str or None, optional (default=None)
The **mean** monotonic trend on the x axis. Supported trends are
“ascending”, and "descending". If None, then the monotonic constraint
is disabled.
monotonic_trend_y : str or None, optional (default=None)
The **mean** monotonic trend on the y axis. Supported trends are
“ascending”, and "descending". If None, then the monotonic constraint
is disabled.
min_mean_diff_x : float, optional (default=0)
The minimum mean difference between consecutives bins on the x axis.
min_mean_diff_y : float, optional (default=0)
The minimum mean difference between consecutives bins on the y axis.
gamma : float, optional (default=0)
Regularization strength to reduce the number of dominating bins. Larger
values specify stronger regularization.
special_codes_x : array-like or None, optional (default=None)
List of special codes for the variable x. Use special codes to specify
the data values that must be treated separately.
special_codes_y : array-like or None, optional (default=None)
List of special codes for the variable y. Use special codes to specify
the data values that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
n_jobs : int or None, optional (default=None)
Number of cores to run in parallel while binning variables.
``None`` means 1 core. ``-1`` means using all processors.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
"""
def __init__(self, name_x="", name_y="", dtype_x="numerical",
dtype_y="numerical", prebinning_method="cart",
strategy="grid", solver="cp", max_n_prebins_x=5,
max_n_prebins_y=5, min_prebin_size_x=0.05,
min_prebin_size_y=0.05, min_n_bins=None, max_n_bins=None,
min_bin_size=None, max_bin_size=None, monotonic_trend_x=None,
monotonic_trend_y=None, min_mean_diff_x=0, min_mean_diff_y=0,
gamma=0, special_codes_x=None, special_codes_y=None,
split_digits=None, n_jobs=1, time_limit=100, verbose=False):
self.name_x = name_x
self.name_y = name_y
self.dtype_x = dtype_x
self.dtype_y = dtype_y
self.prebinning_method = prebinning_method
self.strategy = strategy
self.solver = solver
self.max_n_prebins_x = max_n_prebins_x
self.max_n_prebins_y = max_n_prebins_y
self.min_prebin_size_x = min_prebin_size_x
self.min_prebin_size_y = min_prebin_size_y
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.monotonic_trend_x = monotonic_trend_x
self.monotonic_trend_y = monotonic_trend_y
self.min_mean_diff_x = min_mean_diff_x
self.min_mean_diff_y = min_mean_diff_y
self.gamma = gamma
self.special_codes_x = special_codes_x
self.special_codes_y = special_codes_y
self.split_digits = split_digits
self.n_jobs = n_jobs
self.time_limit = time_limit
self.verbose = verbose
# auxiliary
self._categories_x = None
self._categories_y = None
self._n_records_special = None
self._n_records_missing = None
self._sum_special = None
self._sum_missing = None
self._std_special = None
self._std_missing = None
self._problem_type = "regression"
# info
self._binning_table = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples = None
self._optimizer = None
self._solution = None
self._splits_x_optimal = None
self._splits_y_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
def fit(self, x, y, z, check_input=False):
"""Fit the optimal binning 2D according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
z : array-like, shape = (n_samples,)
Target vector relative to x and y.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : ContinuousOptimalBinning2D
Fitted optimal binning 2D.
"""
return self._fit(x, y, z, check_input)
def fit_transform(self, x, y, z, metric="mean", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Fit the optimal binning 2D according to the given training data,
then transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
z : array-like, shape = (n_samples,)
Target vector relative to x and y.
metric : str (default="mean")
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
z_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, z, check_input).transform(
x, y, metric, metric_special, metric_missing, show_digits,
check_input)
def transform(self, x, y, metric="mean", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Transform given data to mean using bins from the fitted optimal
binning 2D.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
metric : str (default="mean")
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
z_new : numpy array, shape = (n_samples,)
Transformed array.
"""
self._check_is_fitted()
return transform_continuous_target(
self.dtype_x, self.dtype_y, self._splits_x_optimal,
self._splits_y_optimal, x, y, self._n_records, self._sums,
self.special_codes_x, self.special_codes_y, self._categories_x,
self._categories_y, metric, metric_special, metric_missing,
show_digits, check_input)
def _fit(self, x, y, z, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, z_clean, x_missing, y_missing, z_missing,
x_special, y_special, z_special,
categories_x, categories_y] = split_data_2d(
self.dtype_x, self.dtype_y, x, y, z, self.special_codes_x,
self.special_codes_y, check_input)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.dtype_x == "categorical":
logger.info("Pre-processing: number of categories in x: {}"
.format(len(categories_x)))
if self.dtype_y == "categorical":
logger.info("Pre-processing: number of categories in y: {}"
.format(len(categories_y)))
if self.verbose:
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
splits_x = self._fit_prebinning(self.dtype_x, x_clean, z_clean,
self.max_n_prebins_x,
self.min_prebin_size_x)
splits_y = self._fit_prebinning(self.dtype_y, y_clean, z_clean,
self.max_n_prebins_y,
self.min_prebin_size_y)
R, S, SS = self._prebinning_matrices(
splits_x, splits_y, x_clean, y_clean, z_clean, x_missing,
y_missing, z_missing, x_special, y_special, z_special)
if self.strategy == "cart":
if self.verbose:
logger.info("Prebinning: applying strategy cart...")
n_splits_x = len(splits_x)
n_splits_y = len(splits_y)
clf_nodes = n_splits_x * n_splits_y
indices_x = np.digitize(x_clean, splits_x, right=False)
n_bins_x = n_splits_x + 1
indices_y = np.digitize(y_clean, splits_y, right=False)
n_bins_y = n_splits_y + 1
xt = np.empty(len(x_clean), dtype=int)
yt = np.empty(len(y_clean), dtype=int)
for i in range(n_bins_x):
xt[(indices_x == i)] = i
for i in range(n_bins_y):
yt[(indices_y == i)] = i
xyt = np.c_[xt, yt]
min_prebin_size = min(self.min_prebin_size_x,
self.min_prebin_size_y) * 0.25
clf = DecisionTreeRegressor(min_samples_leaf=min_prebin_size,
max_leaf_nodes=clf_nodes)
clf.fit(xyt, z_clean)
self._clf = clf
self._categories_x = categories_x
self._categories_y = categories_y
self._time_prebinning = time.perf_counter() - time_prebinning
self._n_prebins = R.size
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
rows, n_records, sums, stds = self._fit_optimizer(
splits_x, splits_y, R, S, SS)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
# Refinements
m, n = R.shape
self._n_refinements = (m * n * (m + 1) * (n + 1)) // 4 - len(rows)
# solution matrices
D = np.empty(m * n, dtype=float)
P = np.empty(m * n, dtype=int)
selected_rows = np.array(rows, dtype=object)[self._solution]
self._selected_rows = selected_rows
self._m, self._n = m, n
n_selected_rows = selected_rows.shape[0] + 2
opt_sums = np.empty(n_selected_rows, dtype=float)
opt_n_records = np.empty(n_selected_rows, dtype=int)
opt_stds = np.zeros(n_selected_rows, dtype=float)
for i, r in enumerate(selected_rows):
_n_records = n_records[self._solution][i]
_sums = sums[self._solution][i]
_mean = _sums / _n_records
_stds = stds[self._solution][i]
P[r] = i
D[r] = _mean
opt_sums[i] = _sums
opt_n_records[i] = _n_records
opt_stds[i] = _stds
opt_n_records[-2] = self._n_records_special
opt_sums[-2] = self._sum_special
opt_stds[-2] = self._std_special
opt_n_records[-1] = self._n_records_missing
opt_sums[-1] = self._sum_missing
opt_stds[-1] = self._std_missing
self._sums = opt_sums
self._n_records = opt_n_records
D = D.reshape((m, n))
P = P.reshape((m, n))
# optimal bins
splits_x_optimal, splits_y_optimal = self._splits_xy_optimal(
selected_rows, splits_x, splits_y, P)
self._splits_x_optimal = splits_x_optimal
self._splits_y_optimal = splits_y_optimal
# instatiate binning table
self._binning_table = ContinuousBinningTable2D(
self.name_x, self.name_y, self.dtype_x, self.dtype_y,
splits_x_optimal, splits_y_optimal, m, n, opt_n_records,
opt_sums, opt_stds, D, P, self._categories_x, self._categories_y)
self.name = "-".join((self.name_x, self.name_y))
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
def _prebinning_matrices(self, splits_x, splits_y, x_clean, y_clean,
z_clean, x_missing, y_missing, z_missing,
x_special, y_special, z_special):
self._n_records_missing = len(z_missing)
self._n_records_special = len(z_special)
self._sum_missing = np.sum(z_missing)
self._sum_special = np.sum(z_special)
if len(z_missing):
self._std_missing = np.std(z_missing)
else:
self._std_missing = 0
if len(z_special):
self._std_special = np.std(z_special)
else:
self._std_special = 0
n_splits_x = len(splits_x)
n_splits_y = len(splits_y)
indices_x = np.digitize(x_clean, splits_x, right=False)
n_bins_x = n_splits_x + 1
indices_y = np.digitize(y_clean, splits_y, right=False)
n_bins_y = n_splits_y + 1
R = np.empty((n_bins_y, n_bins_x), dtype=float)
S = np.empty((n_bins_y, n_bins_x), dtype=float)
SS = np.empty((n_bins_y, n_bins_x), dtype=float)
for i in range(n_bins_y):
mask_y = (indices_y == i)
for j in range(n_bins_x):
mask_x = (indices_x == j)
mask = mask_x & mask_y
zmask = z_clean[mask]
R[i, j] = np.count_nonzero(mask)
S[i, j] = np.sum(zmask)
SS[i, j] = np.sum(zmask ** 2)
return R, S, SS
def _fit_optimizer(self, splits_x, splits_y, R, S, SS):
if self.verbose:
logger.info("Optimizer started.")
time_init = time.perf_counter()
# Min/max number of bins (bin size)
if self.min_bin_size is not None:
min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples))
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples))
else:
max_bin_size = self.max_bin_size
# Number of threads
n_jobs = effective_n_jobs(self.n_jobs)
if self.verbose:
logger.info("Optimizer: {} jobs.".format(n_jobs))
if self.monotonic_trend_x is None:
logger.info(
"Optimizer: monotonic trend x not set.")
else:
logger.info("Optimizer: monotonic trend x set to {}."
.format(self.monotonic_trend_x))
if self.monotonic_trend_y is None:
logger.info(
"Optimizer: monotonic trend y not set.")
else:
logger.info("Optimizer: monotonic trend y set to {}."
.format(self.monotonic_trend_x))
if self.solver == "cp":
scale = int(1e6)
optimizer = Binning2DCP(
self.monotonic_trend_x, self.monotonic_trend_y,
self.min_n_bins, self.max_n_bins, self.min_mean_diff_x,
self.min_mean_diff_y, self.gamma, n_jobs, self.time_limit)
elif self.solver == "mip":
scale = None
optimizer = Binning2DMIP(
self.monotonic_trend_x, self.monotonic_trend_y,
self.min_n_bins, self.max_n_bins, self.min_mean_diff_x,
self.min_mean_diff_y, self.gamma, n_jobs, self.time_limit)
if self.verbose:
logger.info("Optimizer: model data...")
time_model_data = time.perf_counter()
if self.strategy == "cart":
[n_grid, n_rectangles, rows, cols, c, d_connected_x, d_connected_y,
mean, n_records, sums, stds] = continuous_model_data_cart(
self._clf, R, S, SS, self.monotonic_trend_x,
self.monotonic_trend_y, scale, min_bin_size, max_bin_size)
else:
[n_grid, n_rectangles, rows, cols, c, d_connected_x, d_connected_y,
mean, n_records, sums, stds] = continuous_model_data(
R, S, SS, self.monotonic_trend_x, self.monotonic_trend_y,
scale, min_bin_size, max_bin_size)
self._time_model_data = time.perf_counter() - time_model_data
if self.verbose:
logger.info("Optimizer: model data terminated. Time {:.4f}s"
.format(self._time_model_data))
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model(n_grid, n_rectangles, cols, c, d_connected_x,
d_connected_y, mean, n_records)
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
self._cols = cols
self._rows = rows
self._c = c
return rows, n_records, sums, stds
| (name_x='', name_y='', dtype_x='numerical', dtype_y='numerical', prebinning_method='cart', strategy='grid', solver='cp', max_n_prebins_x=5, max_n_prebins_y=5, min_prebin_size_x=0.05, min_prebin_size_y=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend_x=None, monotonic_trend_y=None, min_mean_diff_x=0, min_mean_diff_y=0, gamma=0, special_codes_x=None, special_codes_y=None, split_digits=None, n_jobs=1, time_limit=100, verbose=False) |
4,619 | optbinning.binning.multidimensional.continuous_binning_2d | __init__ | null | def __init__(self, name_x="", name_y="", dtype_x="numerical",
dtype_y="numerical", prebinning_method="cart",
strategy="grid", solver="cp", max_n_prebins_x=5,
max_n_prebins_y=5, min_prebin_size_x=0.05,
min_prebin_size_y=0.05, min_n_bins=None, max_n_bins=None,
min_bin_size=None, max_bin_size=None, monotonic_trend_x=None,
monotonic_trend_y=None, min_mean_diff_x=0, min_mean_diff_y=0,
gamma=0, special_codes_x=None, special_codes_y=None,
split_digits=None, n_jobs=1, time_limit=100, verbose=False):
self.name_x = name_x
self.name_y = name_y
self.dtype_x = dtype_x
self.dtype_y = dtype_y
self.prebinning_method = prebinning_method
self.strategy = strategy
self.solver = solver
self.max_n_prebins_x = max_n_prebins_x
self.max_n_prebins_y = max_n_prebins_y
self.min_prebin_size_x = min_prebin_size_x
self.min_prebin_size_y = min_prebin_size_y
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.monotonic_trend_x = monotonic_trend_x
self.monotonic_trend_y = monotonic_trend_y
self.min_mean_diff_x = min_mean_diff_x
self.min_mean_diff_y = min_mean_diff_y
self.gamma = gamma
self.special_codes_x = special_codes_x
self.special_codes_y = special_codes_y
self.split_digits = split_digits
self.n_jobs = n_jobs
self.time_limit = time_limit
self.verbose = verbose
# auxiliary
self._categories_x = None
self._categories_y = None
self._n_records_special = None
self._n_records_missing = None
self._sum_special = None
self._sum_missing = None
self._std_special = None
self._std_missing = None
self._problem_type = "regression"
# info
self._binning_table = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples = None
self._optimizer = None
self._solution = None
self._splits_x_optimal = None
self._splits_y_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
| (self, name_x='', name_y='', dtype_x='numerical', dtype_y='numerical', prebinning_method='cart', strategy='grid', solver='cp', max_n_prebins_x=5, max_n_prebins_y=5, min_prebin_size_x=0.05, min_prebin_size_y=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend_x=None, monotonic_trend_y=None, min_mean_diff_x=0, min_mean_diff_y=0, gamma=0, special_codes_x=None, special_codes_y=None, split_digits=None, n_jobs=1, time_limit=100, verbose=False) |
4,626 | optbinning.binning.binning | _compute_prebins | null | def _compute_prebins(self, splits_prebinning, x, y0, y1, sw):
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
if self.dtype == "categorical" and self.user_splits is not None:
indices = np.digitize(x, splits_prebinning, right=True)
n_bins = n_splits
else:
indices = np.digitize(x, splits_prebinning, right=False)
n_bins = n_splits + 1
n_nonevent = np.empty(n_bins, dtype=np.int64)
n_event = np.empty(n_bins, dtype=np.int64)
for i in range(n_bins):
mask = (indices == i)
n_nonevent[i] = np.sum(sw[y0 & mask])
n_event[i] = np.sum(sw[y1 & mask])
mask_remove = (n_nonevent == 0) | (n_event == 0)
if np.any(mask_remove):
if self.divergence in ("hellinger", "triangular"):
self._flag_min_n_event_nonevent = True
else:
self._n_refinements += 1
if (self.dtype == "categorical" and
self.user_splits is not None):
mask_splits = mask_remove
else:
mask_splits = np.concatenate([
mask_remove[:-2], [mask_remove[-2] | mask_remove[-1]]])
if self.user_splits_fixed is not None:
user_splits_fixed = np.asarray(self.user_splits_fixed)
user_splits = np.asarray(self.user_splits)
fixed_remove = user_splits_fixed & mask_splits
if any(fixed_remove):
raise ValueError(
"Fixed user_splits {} are removed "
"because produce pure prebins. Provide "
"different splits to be fixed."
.format(user_splits[fixed_remove]))
# Update boolean array of fixed user splits.
self.user_splits_fixed = user_splits_fixed[~mask_splits]
self.user_splits = user_splits[~mask_splits]
splits = splits_prebinning[~mask_splits]
if self.verbose:
logger.info("Pre-binning: number prebins removed: {}"
.format(np.count_nonzero(mask_remove)))
[splits_prebinning, n_nonevent,
n_event] = self._compute_prebins(splits, x, y0, y1, sw)
return splits_prebinning, n_nonevent, n_event
| (self, splits_prebinning, x, y0, y1, sw) |
4,627 | optbinning.binning.multidimensional.continuous_binning_2d | _fit | null | def _fit(self, x, y, z, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, z_clean, x_missing, y_missing, z_missing,
x_special, y_special, z_special,
categories_x, categories_y] = split_data_2d(
self.dtype_x, self.dtype_y, x, y, z, self.special_codes_x,
self.special_codes_y, check_input)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.dtype_x == "categorical":
logger.info("Pre-processing: number of categories in x: {}"
.format(len(categories_x)))
if self.dtype_y == "categorical":
logger.info("Pre-processing: number of categories in y: {}"
.format(len(categories_y)))
if self.verbose:
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
splits_x = self._fit_prebinning(self.dtype_x, x_clean, z_clean,
self.max_n_prebins_x,
self.min_prebin_size_x)
splits_y = self._fit_prebinning(self.dtype_y, y_clean, z_clean,
self.max_n_prebins_y,
self.min_prebin_size_y)
R, S, SS = self._prebinning_matrices(
splits_x, splits_y, x_clean, y_clean, z_clean, x_missing,
y_missing, z_missing, x_special, y_special, z_special)
if self.strategy == "cart":
if self.verbose:
logger.info("Prebinning: applying strategy cart...")
n_splits_x = len(splits_x)
n_splits_y = len(splits_y)
clf_nodes = n_splits_x * n_splits_y
indices_x = np.digitize(x_clean, splits_x, right=False)
n_bins_x = n_splits_x + 1
indices_y = np.digitize(y_clean, splits_y, right=False)
n_bins_y = n_splits_y + 1
xt = np.empty(len(x_clean), dtype=int)
yt = np.empty(len(y_clean), dtype=int)
for i in range(n_bins_x):
xt[(indices_x == i)] = i
for i in range(n_bins_y):
yt[(indices_y == i)] = i
xyt = np.c_[xt, yt]
min_prebin_size = min(self.min_prebin_size_x,
self.min_prebin_size_y) * 0.25
clf = DecisionTreeRegressor(min_samples_leaf=min_prebin_size,
max_leaf_nodes=clf_nodes)
clf.fit(xyt, z_clean)
self._clf = clf
self._categories_x = categories_x
self._categories_y = categories_y
self._time_prebinning = time.perf_counter() - time_prebinning
self._n_prebins = R.size
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
rows, n_records, sums, stds = self._fit_optimizer(
splits_x, splits_y, R, S, SS)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
# Refinements
m, n = R.shape
self._n_refinements = (m * n * (m + 1) * (n + 1)) // 4 - len(rows)
# solution matrices
D = np.empty(m * n, dtype=float)
P = np.empty(m * n, dtype=int)
selected_rows = np.array(rows, dtype=object)[self._solution]
self._selected_rows = selected_rows
self._m, self._n = m, n
n_selected_rows = selected_rows.shape[0] + 2
opt_sums = np.empty(n_selected_rows, dtype=float)
opt_n_records = np.empty(n_selected_rows, dtype=int)
opt_stds = np.zeros(n_selected_rows, dtype=float)
for i, r in enumerate(selected_rows):
_n_records = n_records[self._solution][i]
_sums = sums[self._solution][i]
_mean = _sums / _n_records
_stds = stds[self._solution][i]
P[r] = i
D[r] = _mean
opt_sums[i] = _sums
opt_n_records[i] = _n_records
opt_stds[i] = _stds
opt_n_records[-2] = self._n_records_special
opt_sums[-2] = self._sum_special
opt_stds[-2] = self._std_special
opt_n_records[-1] = self._n_records_missing
opt_sums[-1] = self._sum_missing
opt_stds[-1] = self._std_missing
self._sums = opt_sums
self._n_records = opt_n_records
D = D.reshape((m, n))
P = P.reshape((m, n))
# optimal bins
splits_x_optimal, splits_y_optimal = self._splits_xy_optimal(
selected_rows, splits_x, splits_y, P)
self._splits_x_optimal = splits_x_optimal
self._splits_y_optimal = splits_y_optimal
# instatiate binning table
self._binning_table = ContinuousBinningTable2D(
self.name_x, self.name_y, self.dtype_x, self.dtype_y,
splits_x_optimal, splits_y_optimal, m, n, opt_n_records,
opt_sums, opt_stds, D, P, self._categories_x, self._categories_y)
self.name = "-".join((self.name_x, self.name_y))
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (self, x, y, z, check_input) |
4,628 | optbinning.binning.multidimensional.continuous_binning_2d | _fit_optimizer | null | def _fit_optimizer(self, splits_x, splits_y, R, S, SS):
if self.verbose:
logger.info("Optimizer started.")
time_init = time.perf_counter()
# Min/max number of bins (bin size)
if self.min_bin_size is not None:
min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples))
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples))
else:
max_bin_size = self.max_bin_size
# Number of threads
n_jobs = effective_n_jobs(self.n_jobs)
if self.verbose:
logger.info("Optimizer: {} jobs.".format(n_jobs))
if self.monotonic_trend_x is None:
logger.info(
"Optimizer: monotonic trend x not set.")
else:
logger.info("Optimizer: monotonic trend x set to {}."
.format(self.monotonic_trend_x))
if self.monotonic_trend_y is None:
logger.info(
"Optimizer: monotonic trend y not set.")
else:
logger.info("Optimizer: monotonic trend y set to {}."
.format(self.monotonic_trend_x))
if self.solver == "cp":
scale = int(1e6)
optimizer = Binning2DCP(
self.monotonic_trend_x, self.monotonic_trend_y,
self.min_n_bins, self.max_n_bins, self.min_mean_diff_x,
self.min_mean_diff_y, self.gamma, n_jobs, self.time_limit)
elif self.solver == "mip":
scale = None
optimizer = Binning2DMIP(
self.monotonic_trend_x, self.monotonic_trend_y,
self.min_n_bins, self.max_n_bins, self.min_mean_diff_x,
self.min_mean_diff_y, self.gamma, n_jobs, self.time_limit)
if self.verbose:
logger.info("Optimizer: model data...")
time_model_data = time.perf_counter()
if self.strategy == "cart":
[n_grid, n_rectangles, rows, cols, c, d_connected_x, d_connected_y,
mean, n_records, sums, stds] = continuous_model_data_cart(
self._clf, R, S, SS, self.monotonic_trend_x,
self.monotonic_trend_y, scale, min_bin_size, max_bin_size)
else:
[n_grid, n_rectangles, rows, cols, c, d_connected_x, d_connected_y,
mean, n_records, sums, stds] = continuous_model_data(
R, S, SS, self.monotonic_trend_x, self.monotonic_trend_y,
scale, min_bin_size, max_bin_size)
self._time_model_data = time.perf_counter() - time_model_data
if self.verbose:
logger.info("Optimizer: model data terminated. Time {:.4f}s"
.format(self._time_model_data))
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model(n_grid, n_rectangles, cols, c, d_connected_x,
d_connected_y, mean, n_records)
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
self._cols = cols
self._rows = rows
self._c = c
return rows, n_records, sums, stds
| (self, splits_x, splits_y, R, S, SS) |
4,629 | optbinning.binning.multidimensional.binning_2d | _fit_prebinning | null | def _fit_prebinning(self, dtype, x, z, max_n_prebins, min_prebin_size):
# Pre-binning algorithm
min_bin_size = int(np.ceil(min_prebin_size * self._n_samples))
prebinning = PreBinning(method=self.prebinning_method,
n_bins=max_n_prebins,
min_bin_size=min_bin_size,
problem_type=self._problem_type).fit(x, z)
return prebinning.splits
| (self, dtype, x, z, max_n_prebins, min_prebin_size) |
4,634 | optbinning.binning.multidimensional.continuous_binning_2d | _prebinning_matrices | null | def _prebinning_matrices(self, splits_x, splits_y, x_clean, y_clean,
z_clean, x_missing, y_missing, z_missing,
x_special, y_special, z_special):
self._n_records_missing = len(z_missing)
self._n_records_special = len(z_special)
self._sum_missing = np.sum(z_missing)
self._sum_special = np.sum(z_special)
if len(z_missing):
self._std_missing = np.std(z_missing)
else:
self._std_missing = 0
if len(z_special):
self._std_special = np.std(z_special)
else:
self._std_special = 0
n_splits_x = len(splits_x)
n_splits_y = len(splits_y)
indices_x = np.digitize(x_clean, splits_x, right=False)
n_bins_x = n_splits_x + 1
indices_y = np.digitize(y_clean, splits_y, right=False)
n_bins_y = n_splits_y + 1
R = np.empty((n_bins_y, n_bins_x), dtype=float)
S = np.empty((n_bins_y, n_bins_x), dtype=float)
SS = np.empty((n_bins_y, n_bins_x), dtype=float)
for i in range(n_bins_y):
mask_y = (indices_y == i)
for j in range(n_bins_x):
mask_x = (indices_x == j)
mask = mask_x & mask_y
zmask = z_clean[mask]
R[i, j] = np.count_nonzero(mask)
S[i, j] = np.sum(zmask)
SS[i, j] = np.sum(zmask ** 2)
return R, S, SS
| (self, splits_x, splits_y, x_clean, y_clean, z_clean, x_missing, y_missing, z_missing, x_special, y_special, z_special) |
4,635 | optbinning.binning.binning | _prebinning_refinement | null | def _prebinning_refinement(self, splits_prebinning, x, y, y_missing,
x_special, y_special, y_others, sw_clean,
sw_missing, sw_special, sw_others):
y0 = (y == 0)
y1 = ~y0
# Compute n_nonevent and n_event for special, missing and others.
self._n_nonevent_special, self._n_event_special = target_info_special(
self.special_codes, x_special, y_special, sw_special)
self._n_nonevent_missing, self._n_event_missing = target_info_samples(
y_missing, sw_missing)
if len(y_others):
(self._n_nonevent_cat_others,
self._n_event_cat_others) = target_info_samples(
y_others, sw_others)
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
if self.split_digits is not None:
splits_prebinning = np.round(splits_prebinning, self.split_digits)
splits_prebinning, n_nonevent, n_event = self._compute_prebins(
splits_prebinning, x, y0, y1, sw_clean)
return splits_prebinning, n_nonevent, n_event
| (self, splits_prebinning, x, y, y_missing, x_special, y_special, y_others, sw_clean, sw_missing, sw_special, sw_others) |
4,638 | optbinning.binning.multidimensional.binning_2d | _splits_xy_optimal | null | def _splits_xy_optimal(self, selected_rows, splits_x, splits_y, P):
bins_x = np.concatenate([[-np.inf], splits_x, [np.inf]])
bins_y = np.concatenate([[-np.inf], splits_y, [np.inf]])
bins_str_x = np.array([[bins_x[i], bins_x[i+1]]
for i in range(len(bins_x) - 1)])
bins_str_y = np.array([[bins_y[i], bins_y[i+1]]
for i in range(len(bins_y) - 1)])
splits_x_optimal = []
splits_y_optimal = []
for i in range(len(selected_rows)):
pos_y, pos_x = np.where(P == i)
mask_x = np.arange(pos_x.min(), pos_x.max() + 1)
mask_y = np.arange(pos_y.min(), pos_y.max() + 1)
bin_x = bins_str_x[mask_x]
bin_y = bins_str_y[mask_y]
splits_x_optimal.append([bin_x[0][0], bin_x[-1][1]])
splits_y_optimal.append([bin_y[0][0], bin_y[-1][1]])
return splits_x_optimal, splits_y_optimal
| (self, selected_rows, splits_x, splits_y, P) |
4,641 | optbinning.binning.multidimensional.continuous_binning_2d | fit | Fit the optimal binning 2D according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
z : array-like, shape = (n_samples,)
Target vector relative to x and y.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : ContinuousOptimalBinning2D
Fitted optimal binning 2D.
| def fit(self, x, y, z, check_input=False):
"""Fit the optimal binning 2D according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
z : array-like, shape = (n_samples,)
Target vector relative to x and y.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : ContinuousOptimalBinning2D
Fitted optimal binning 2D.
"""
return self._fit(x, y, z, check_input)
| (self, x, y, z, check_input=False) |
4,642 | optbinning.binning.multidimensional.continuous_binning_2d | fit_transform | Fit the optimal binning 2D according to the given training data,
then transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
z : array-like, shape = (n_samples,)
Target vector relative to x and y.
metric : str (default="mean")
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
z_new : numpy array, shape = (n_samples,)
Transformed array.
| def fit_transform(self, x, y, z, metric="mean", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Fit the optimal binning 2D according to the given training data,
then transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
z : array-like, shape = (n_samples,)
Target vector relative to x and y.
metric : str (default="mean")
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
z_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, z, check_input).transform(
x, y, metric, metric_special, metric_missing, show_digits,
check_input)
| (self, x, y, z, metric='mean', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,646 | optbinning.binning.binning | read_json |
Read json file containing split points and set them as the new split
points.
Parameters
----------
path: The path of the json file.
| def read_json(self, path):
"""
Read json file containing split points and set them as the new split
points.
Parameters
----------
path: The path of the json file.
"""
self._is_fitted = True
with open(path, "r") as read_file:
bin_table_attr = json.load(read_file)
for key in bin_table_attr.keys():
if isinstance(bin_table_attr[key], list):
bin_table_attr[key] = np.array(bin_table_attr[key])
self._binning_table = BinningTable(**bin_table_attr)
| (self, path) |
4,647 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``check_input`` parameter in ``fit``.
x : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``x`` parameter in ``fit``.
z : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``z`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: optbinning.binning.multidimensional.continuous_binning_2d.ContinuousOptimalBinning2D, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$', x: Union[bool, NoneType, str] = '$UNCHANGED$', z: Union[bool, NoneType, str] = '$UNCHANGED$') -> optbinning.binning.multidimensional.continuous_binning_2d.ContinuousOptimalBinning2D |
4,650 | optbinning.binning.binning | to_json |
Save optimal bins and/or splits points and transformation depending on
the target type.
Parameters
----------
path: The path where the json is going to be saved.
| def to_json(self, path):
"""
Save optimal bins and/or splits points and transformation depending on
the target type.
Parameters
----------
path: The path where the json is going to be saved.
"""
if path is None:
raise ValueError('Specify the path for the json file')
table = self.binning_table
opt_bin_dict = dict()
opt_bin_dict['name'] = table.name
opt_bin_dict['dtype'] = table.dtype
opt_bin_dict['special_codes'] = table.special_codes
if table.dtype == 'numerical':
opt_bin_dict['splits'] = table.splits.tolist()
elif table.dtype == 'categorical':
opt_bin_dict['splits'] = [split.tolist() for split in table.splits]
opt_bin_dict['n_nonevent'] = table.n_nonevent.tolist()
opt_bin_dict['n_event'] = table.n_event.tolist()
opt_bin_dict['min_x'] = table.min_x
opt_bin_dict['max_x'] = table.max_x
opt_bin_dict['categories'] = table.categories
opt_bin_dict['cat_others'] = table.cat_others
opt_bin_dict['user_splits'] = table.user_splits
with open(path, "w") as write_file:
json.dump(opt_bin_dict, write_file)
| (self, path) |
4,651 | optbinning.binning.multidimensional.continuous_binning_2d | transform | Transform given data to mean using bins from the fitted optimal
binning 2D.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
metric : str (default="mean")
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
z_new : numpy array, shape = (n_samples,)
Transformed array.
| def transform(self, x, y, metric="mean", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Transform given data to mean using bins from the fitted optimal
binning 2D.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector x, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Training vector y, where n_samples is the number of samples.
metric : str (default="mean")
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
z_new : numpy array, shape = (n_samples,)
Transformed array.
"""
self._check_is_fitted()
return transform_continuous_target(
self.dtype_x, self.dtype_y, self._splits_x_optimal,
self._splits_y_optimal, x, y, self._n_records, self._sums,
self.special_codes_x, self.special_codes_y, self._categories_x,
self._categories_y, metric, metric_special, metric_missing,
show_digits, check_input)
| (self, x, y, metric='mean', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,652 | optbinning.binning.piecewise.continuous_binning | ContinuousOptimalPWBinning | Optimal Piecewise binning of a numerical variable with respect to a
binary target.
Parameters
----------
name : str, optional (default="")
The variable name.
objective : str, optional (default="l2")
The objective function. Supported objectives are "l2", "l1", "huber"
and "quantile". Note that "l1", "huber" and "quantile" are robust
objective functions.
degree : int (default=1)
The degree of the polynomials.
* degree = 0: piecewise constant functions.
* degree = 1: piecewise linear functions.
* degree > 1: piecewise polynomial functions.
continuous : bool (default=True)
Whether to fit a continuous or discontinuous piecewise regression.
continuous_deriv : bool (default=True)
Whether to fit a polynomial with continuous derivatives. This option
fits a smooth degree d-polynomial with d-1 continuity in derivatives
(splines).
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "quantile" to generate prebins with approximately same
frequency and "uniform" to generate prebins with equal width. Method
"cart" uses `sklearn.tree.DecistionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend : str or None, optional (default="auto")
The monotonic trend. Supported trends are “auto”, "auto_heuristic" and
"auto_asc_desc" to automatically determine the trend maximizing IV
using a machine learning classifier, "ascending", "descending",
"concave", "convex", "peak" and "peak_heuristic" to allow a peak change
point, and "valley" and "valley_heuristic" to allow a valley change
point. Trends "auto_heuristic", "peak_heuristic" and "valley_heuristic"
use a heuristic to determine the change point, and are significantly
faster for large size instances (``max_n_prebins > 20``). Trend
"auto_asc_desc" is used to automatically select the best monotonic
trend between "ascending" and "descending". If None, then the
monotonic constraint is disabled.
n_subsamples : int or None (default=None)
Number of subsamples to fit the piecewise regression algorithm. If
None, all values are considered.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The Z-test is used to detect bins
not satisfying the p-value constraint. Option supported by solvers
"cp" and "mip".
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
outlier_detector : str or None, optional (default=None)
The outlier detection method. Supported methods are "range" to use
the interquartile range based method, "zcore" to use the modified
Z-score method or "yquantile" to use the y-axis detector over
quantiles.
outlier_params : dict or None, optional (default=None)
Dictionary of parameters to pass to the outlier detection method.
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points when ``dtype`` is "numerical" or
the list of prebins when ``dtype`` is "categorical".
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
special_codes : array-like, dict or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
solver : str, optional (default="auto")
The optimizer to solve the underlying mathematical optimization
problem. Supported solvers are `"ecos"
<https://github.com/embotech/ecos>`_, `"osqp"
<https://github.com/oxfordcontrol/osqp>`_, "direct", to choose the
direct solver, and "auto", to choose the most appropriate solver for
the problem. Version 0.16.1 added support to solvers
`"scs" <https://github.com/cvxgrp/scs>`_ and `"highs"
<https://github.com/ERGO-Code/HiGHS>`_.
h_epsilon: float (default=1.35)
The parameter h_epsilon used when ``objective="huber"``, controls the
number of samples that should be classified as outliers.
quantile : float (default=0.5)
The parameter quantile is the q-th quantile to be used when
``objective="quantile"``.
regularization: str or None (default=None)
Type of regularization. Supported regularization are "l1" (Lasso) and
"l2" (Ridge). If None, no regularization is applied.
reg_l1 : float (default=1.0)
L1 regularization term. Increasing this value will smooth the
regression model. Only applicable if ``regularization="l1"``.
reg_l2 : float (default=1.0)
L2 regularization term. Increasing this value will smooth the
regression model. Only applicable if ``regularization="l2"``.
random_state : int, RandomState instance or None, (default=None)
If ``n_subsamples < n_samples``, controls the shuffling applied to the
data before applying the split.
verbose : bool (default=False)
Enable verbose output.
| class ContinuousOptimalPWBinning(BasePWBinning):
"""Optimal Piecewise binning of a numerical variable with respect to a
binary target.
Parameters
----------
name : str, optional (default="")
The variable name.
objective : str, optional (default="l2")
The objective function. Supported objectives are "l2", "l1", "huber"
and "quantile". Note that "l1", "huber" and "quantile" are robust
objective functions.
degree : int (default=1)
The degree of the polynomials.
* degree = 0: piecewise constant functions.
* degree = 1: piecewise linear functions.
* degree > 1: piecewise polynomial functions.
continuous : bool (default=True)
Whether to fit a continuous or discontinuous piecewise regression.
continuous_deriv : bool (default=True)
Whether to fit a polynomial with continuous derivatives. This option
fits a smooth degree d-polynomial with d-1 continuity in derivatives
(splines).
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "quantile" to generate prebins with approximately same
frequency and "uniform" to generate prebins with equal width. Method
"cart" uses `sklearn.tree.DecistionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend : str or None, optional (default="auto")
The monotonic trend. Supported trends are “auto”, "auto_heuristic" and
"auto_asc_desc" to automatically determine the trend maximizing IV
using a machine learning classifier, "ascending", "descending",
"concave", "convex", "peak" and "peak_heuristic" to allow a peak change
point, and "valley" and "valley_heuristic" to allow a valley change
point. Trends "auto_heuristic", "peak_heuristic" and "valley_heuristic"
use a heuristic to determine the change point, and are significantly
faster for large size instances (``max_n_prebins > 20``). Trend
"auto_asc_desc" is used to automatically select the best monotonic
trend between "ascending" and "descending". If None, then the
monotonic constraint is disabled.
n_subsamples : int or None (default=None)
Number of subsamples to fit the piecewise regression algorithm. If
None, all values are considered.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The Z-test is used to detect bins
not satisfying the p-value constraint. Option supported by solvers
"cp" and "mip".
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
outlier_detector : str or None, optional (default=None)
The outlier detection method. Supported methods are "range" to use
the interquartile range based method, "zcore" to use the modified
Z-score method or "yquantile" to use the y-axis detector over
quantiles.
outlier_params : dict or None, optional (default=None)
Dictionary of parameters to pass to the outlier detection method.
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points when ``dtype`` is "numerical" or
the list of prebins when ``dtype`` is "categorical".
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
special_codes : array-like, dict or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
solver : str, optional (default="auto")
The optimizer to solve the underlying mathematical optimization
problem. Supported solvers are `"ecos"
<https://github.com/embotech/ecos>`_, `"osqp"
<https://github.com/oxfordcontrol/osqp>`_, "direct", to choose the
direct solver, and "auto", to choose the most appropriate solver for
the problem. Version 0.16.1 added support to solvers
`"scs" <https://github.com/cvxgrp/scs>`_ and `"highs"
<https://github.com/ERGO-Code/HiGHS>`_.
h_epsilon: float (default=1.35)
The parameter h_epsilon used when ``objective="huber"``, controls the
number of samples that should be classified as outliers.
quantile : float (default=0.5)
The parameter quantile is the q-th quantile to be used when
``objective="quantile"``.
regularization: str or None (default=None)
Type of regularization. Supported regularization are "l1" (Lasso) and
"l2" (Ridge). If None, no regularization is applied.
reg_l1 : float (default=1.0)
L1 regularization term. Increasing this value will smooth the
regression model. Only applicable if ``regularization="l1"``.
reg_l2 : float (default=1.0)
L2 regularization term. Increasing this value will smooth the
regression model. Only applicable if ``regularization="l2"``.
random_state : int, RandomState instance or None, (default=None)
If ``n_subsamples < n_samples``, controls the shuffling applied to the
data before applying the split.
verbose : bool (default=False)
Enable verbose output.
"""
def __init__(self, name="", objective="l2", degree=1, continuous=True,
continuous_deriv=True, prebinning_method="cart",
max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None,
max_n_bins=None, min_bin_size=None, max_bin_size=None,
monotonic_trend="auto", n_subsamples=None, max_pvalue=None,
max_pvalue_policy="consecutive", outlier_detector=None,
outlier_params=None, user_splits=None, user_splits_fixed=None,
special_codes=None, split_digits=None, solver="auto",
h_epsilon=1.35, quantile=0.5, regularization=None, reg_l1=1.0,
reg_l2=1.0, random_state=None, verbose=False):
super().__init__(name, None, objective, degree, continuous,
continuous_deriv, prebinning_method, max_n_prebins,
min_prebin_size, min_n_bins, max_n_bins, min_bin_size,
max_bin_size, monotonic_trend, n_subsamples,
max_pvalue, max_pvalue_policy, outlier_detector,
outlier_params, user_splits, user_splits_fixed,
special_codes, split_digits, solver, h_epsilon,
quantile, regularization, reg_l1, reg_l2,
random_state, verbose)
self._problem_type = "regression"
self._n_records_missing = None
self._n_records_special = None
self._sum_special = None
self._sum_missing = None
self._std_special = None
self._std_missing = None
self._min_target_missing = None
self._min_target_special = None
self._max_target_missing = None
self._max_target_special = None
self._n_zeros_missing = None
self._n_zeros_special = None
def fit_transform(self, x, y, metric_special=0, metric_missing=0,
lb=None, ub=None, check_input=False):
"""Fit the optimal piecewise binning according to the given training
data, then transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean and any
numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean and any
numerical value.
lb : float or None (default=None)
Avoid values below the lower bound lb.
ub : float or None (default=None)
Avoid values above the upper bound ub.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, check_input).transform(
x, metric_special, metric_missing, lb, ub, check_input)
def transform(self, x, metric_special=0, metric_missing=0,
lb=None, ub=None, check_input=False):
"""Transform given data using bins from the fitted optimal piecewise
binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean and any
numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean and any
numerical value.
lb : float or None (default=None)
Avoid values below the lower bound lb.
ub : float or None (default=None)
Avoid values above the upper bound ub.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
self._check_is_fitted()
return transform_continuous_target(
self._optb.splits, x, self._c, lb, ub, self._n_records_special,
self._sum_special, self._n_records_missing, self._sum_missing,
self.special_codes, metric_special, metric_missing, check_input)
def _fit(self, x, y, lb, ub, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal piecewise binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params(deep=False), estimator=None,
problem_type=self._problem_type)
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
_, _, _, _, _, sw_special, _] = self._fit_preprocessing(
x, y, check_input)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.outlier_detector is not None:
n_outlier = self._n_samples-(n_clean + n_missing + n_special)
logger.info("Pre-processing: number of outlier samples: {}"
.format(n_outlier))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
self._time_estimator = 0
# Fit optimal binning algorithm for continuous target. Use optimal
# split points to compute optimal piecewise functions
self._fit_binning(x_clean, y_clean, y_clean, lb, ub)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
[self._n_records_special, self._sum_special, self._n_zeros_special,
self._std_special, self._min_target_special,
self._max_target_special] = target_info_special_continuous(
self.special_codes, x_special, y_special, sw_special)
self._n_records_missing = len(y_missing)
self._sum_missing = np.sum(y_missing)
self._n_zeros_missing = np.count_nonzero(y_missing == 0)
if len(y_missing):
self._std_missing = np.std(y_missing)
self._min_target_missing = np.min(y_missing)
self._max_target_missing = np.max(y_missing)
bt = self._optb.binning_table.build(add_totals=False)
n_records = bt["Count"].values[:-2]
sums = bt["Sum"].values[:-2]
stds = bt["Std"].values[:-2]
min_target = bt["Min"].values[:-2]
max_target = bt["Max"].values[:-2]
n_zeros = bt["Zeros count"].values[:-2]
n_records = np.r_[n_records, self._n_records_special]
sums = np.r_[sums, self._sum_special]
stds = np.r_[stds, self._std_special]
min_target = np.r_[min_target, self._min_target_special]
max_target = np.r_[max_target, self._max_target_special]
n_zeros = np.r_[n_zeros, self._n_zeros_special]
n_records = np.r_[n_records, self._n_records_missing]
sums = np.r_[sums, self._sum_missing]
stds = np.r_[stds, self._std_missing]
min_target = np.r_[min_target, self._min_target_missing]
max_target = np.r_[max_target, self._max_target_missing]
n_zeros = np.r_[n_zeros, self._n_zeros_missing]
# Compute metrics
if self.verbose:
logger.info("Post-processing: compute performance metrics.")
d_metrics = continuous_metrics(
x_clean, y_clean, self._optb.splits, self._c, lb, ub,
self._n_records_special, self._sum_special,
self._n_records_missing, self._sum_missing, self.special_codes)
# Binning table
self._binning_table = PWContinuousBinningTable(
self.name, self.special_codes, self._optb.splits, self._c,
n_records, sums, stds, min_target, max_target, n_zeros, lb, ub,
x_clean.min(), x_clean.max(), d_metrics)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal piecewise binning terminated. Status: {}. "
"Time: {:.4f}s".format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (name='', objective='l2', degree=1, continuous=True, continuous_deriv=True, prebinning_method='cart', max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend='auto', n_subsamples=None, max_pvalue=None, max_pvalue_policy='consecutive', outlier_detector=None, outlier_params=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, solver='auto', h_epsilon=1.35, quantile=0.5, regularization=None, reg_l1=1.0, reg_l2=1.0, random_state=None, verbose=False) |
4,654 | optbinning.binning.piecewise.continuous_binning | __init__ | null | def __init__(self, name="", objective="l2", degree=1, continuous=True,
continuous_deriv=True, prebinning_method="cart",
max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None,
max_n_bins=None, min_bin_size=None, max_bin_size=None,
monotonic_trend="auto", n_subsamples=None, max_pvalue=None,
max_pvalue_policy="consecutive", outlier_detector=None,
outlier_params=None, user_splits=None, user_splits_fixed=None,
special_codes=None, split_digits=None, solver="auto",
h_epsilon=1.35, quantile=0.5, regularization=None, reg_l1=1.0,
reg_l2=1.0, random_state=None, verbose=False):
super().__init__(name, None, objective, degree, continuous,
continuous_deriv, prebinning_method, max_n_prebins,
min_prebin_size, min_n_bins, max_n_bins, min_bin_size,
max_bin_size, monotonic_trend, n_subsamples,
max_pvalue, max_pvalue_policy, outlier_detector,
outlier_params, user_splits, user_splits_fixed,
special_codes, split_digits, solver, h_epsilon,
quantile, regularization, reg_l1, reg_l2,
random_state, verbose)
self._problem_type = "regression"
self._n_records_missing = None
self._n_records_special = None
self._sum_special = None
self._sum_missing = None
self._std_special = None
self._std_missing = None
self._min_target_missing = None
self._min_target_special = None
self._max_target_missing = None
self._max_target_special = None
self._n_zeros_missing = None
self._n_zeros_special = None
| (self, name='', objective='l2', degree=1, continuous=True, continuous_deriv=True, prebinning_method='cart', max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend='auto', n_subsamples=None, max_pvalue=None, max_pvalue_policy='consecutive', outlier_detector=None, outlier_params=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, solver='auto', h_epsilon=1.35, quantile=0.5, regularization=None, reg_l1=1.0, reg_l2=1.0, random_state=None, verbose=False) |
4,661 | optbinning.binning.piecewise.continuous_binning | _fit | null | def _fit(self, x, y, lb, ub, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal piecewise binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params(deep=False), estimator=None,
problem_type=self._problem_type)
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
_, _, _, _, _, sw_special, _] = self._fit_preprocessing(
x, y, check_input)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.outlier_detector is not None:
n_outlier = self._n_samples-(n_clean + n_missing + n_special)
logger.info("Pre-processing: number of outlier samples: {}"
.format(n_outlier))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
self._time_estimator = 0
# Fit optimal binning algorithm for continuous target. Use optimal
# split points to compute optimal piecewise functions
self._fit_binning(x_clean, y_clean, y_clean, lb, ub)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
[self._n_records_special, self._sum_special, self._n_zeros_special,
self._std_special, self._min_target_special,
self._max_target_special] = target_info_special_continuous(
self.special_codes, x_special, y_special, sw_special)
self._n_records_missing = len(y_missing)
self._sum_missing = np.sum(y_missing)
self._n_zeros_missing = np.count_nonzero(y_missing == 0)
if len(y_missing):
self._std_missing = np.std(y_missing)
self._min_target_missing = np.min(y_missing)
self._max_target_missing = np.max(y_missing)
bt = self._optb.binning_table.build(add_totals=False)
n_records = bt["Count"].values[:-2]
sums = bt["Sum"].values[:-2]
stds = bt["Std"].values[:-2]
min_target = bt["Min"].values[:-2]
max_target = bt["Max"].values[:-2]
n_zeros = bt["Zeros count"].values[:-2]
n_records = np.r_[n_records, self._n_records_special]
sums = np.r_[sums, self._sum_special]
stds = np.r_[stds, self._std_special]
min_target = np.r_[min_target, self._min_target_special]
max_target = np.r_[max_target, self._max_target_special]
n_zeros = np.r_[n_zeros, self._n_zeros_special]
n_records = np.r_[n_records, self._n_records_missing]
sums = np.r_[sums, self._sum_missing]
stds = np.r_[stds, self._std_missing]
min_target = np.r_[min_target, self._min_target_missing]
max_target = np.r_[max_target, self._max_target_missing]
n_zeros = np.r_[n_zeros, self._n_zeros_missing]
# Compute metrics
if self.verbose:
logger.info("Post-processing: compute performance metrics.")
d_metrics = continuous_metrics(
x_clean, y_clean, self._optb.splits, self._c, lb, ub,
self._n_records_special, self._sum_special,
self._n_records_missing, self._sum_missing, self.special_codes)
# Binning table
self._binning_table = PWContinuousBinningTable(
self.name, self.special_codes, self._optb.splits, self._c,
n_records, sums, stds, min_target, max_target, n_zeros, lb, ub,
x_clean.min(), x_clean.max(), d_metrics)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal piecewise binning terminated. Status: {}. "
"Time: {:.4f}s".format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (self, x, y, lb, ub, check_input) |
4,662 | optbinning.binning.piecewise.base | _fit_binning | null | def _fit_binning(self, x, y, prediction, lb, ub):
if self.verbose:
logger.info("Pre-binning: optimal binning started.")
time_prebinning = time.perf_counter()
# Determine optimal split points
monotonic_trend = self.monotonic_trend
if self.monotonic_trend in ("concave", "convex"):
monotonic_trend = "auto"
if self._problem_type == "regression":
self._optb = ContinuousOptimalBinning(
name=self.name, dtype="numerical",
prebinning_method=self.prebinning_method,
max_n_prebins=self.max_n_prebins,
min_prebin_size=self.min_prebin_size,
min_n_bins=self.min_n_bins,
max_n_bins=self.max_n_bins,
min_bin_size=self.min_bin_size,
max_bin_size=self.max_bin_size,
monotonic_trend=monotonic_trend,
max_pvalue=self.max_pvalue,
max_pvalue_policy=self.max_pvalue_policy,
outlier_detector=self.outlier_detector,
outlier_params=self.outlier_params,
user_splits=self.user_splits,
user_splits_fixed=self.user_splits_fixed,
split_digits=self.split_digits)
elif self._problem_type == "classification":
self._optb = OptimalBinning(
name=self.name, dtype="numerical",
prebinning_method=self.prebinning_method,
max_n_prebins=self.max_n_prebins,
min_prebin_size=self.min_prebin_size,
min_n_bins=self.min_n_bins,
max_n_bins=self.max_n_bins,
min_bin_size=self.min_bin_size,
max_bin_size=self.max_bin_size,
monotonic_trend=monotonic_trend,
max_pvalue=self.max_pvalue,
max_pvalue_policy=self.max_pvalue_policy,
outlier_detector=self.outlier_detector,
outlier_params=self.outlier_params,
user_splits=self.user_splits,
user_splits_fixed=self.user_splits_fixed,
split_digits=self.split_digits)
self._optb.fit(x, y)
splits = self._optb.splits
n_splits = len(splits)
if self.verbose:
logger.info("Pre-binning: number of splits: {}."
.format(n_splits))
# Prepare optimization model data
n_bins = n_splits + 1
self._n_bins = n_bins
if self.n_subsamples is None or self.n_subsamples > len(x):
x_subsamples = x
pred_subsamples = prediction
if self.verbose:
logger.info("Pre-binning: no need for subsamples.")
else:
indices = np.digitize(x, splits, right=False)
[_, x_subsamples, _, pred_subsamples,
_, _, _, _] = train_test_split(
x, prediction, y, indices, test_size=self.n_subsamples,
random_state=self.random_state)
if self.verbose:
logger.info("Pre-binning: number of subsamples: {}."
.format(self.n_subsamples))
self._time_prebinning = time.perf_counter() - time_prebinning
if self.verbose:
logger.info("Pre-binning: optimal binning terminated. Time {:.4}s."
.format(self._time_prebinning))
# LP problem
if self.verbose:
logger.info("Optimizer started.")
if self.monotonic_trend == "auto":
indices = np.digitize(x, splits, right=False)
mean = np.array([y[indices == i].mean() for i in range(n_bins)])
monotonic = type_of_monotonic_trend(mean)
if monotonic in ("undefined", "no monotonic"):
monotonic = None
elif "peak" in monotonic:
monotonic = "peak"
elif "valley" in monotonic:
monotonic = "valley"
if self.verbose:
logger.info("Optimizer: {} monotonic trend."
.format(monotonic))
else:
monotonic = self.monotonic_trend
time_solver = time.perf_counter()
optimizer = RobustPWRegression(
objective=self.objective,
degree=self.degree,
continuous=self.continuous,
continuous_deriv=self.continuous_deriv,
monotonic_trend=monotonic,
solver=self.solver,
h_epsilon=self.h_epsilon,
quantile=self.quantile,
regularization=self.regularization,
reg_l1=self.reg_l1,
reg_l2=self.reg_l2,
extrapolation="continue",
verbose=self.verbose)
optimizer.fit(x_subsamples, pred_subsamples, splits, lb=lb, ub=ub)
self._c = optimizer.coef_
self._optimizer = optimizer
self._status = retrieve_status(optimizer.status)
self._splits_optimal = splits
self._time_solver = time.perf_counter() - time_solver
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
| (self, x, y, prediction, lb, ub) |
4,663 | optbinning.binning.piecewise.base | _fit_preprocessing | null | def _fit_preprocessing(self, x, y, check_input):
return split_data(dtype="numerical", x=x, y=y,
special_codes=self.special_codes,
user_splits=self.user_splits,
check_input=check_input,
outlier_detector=self.outlier_detector,
outlier_params=self.outlier_params)
| (self, x, y, check_input) |
4,672 | optbinning.binning.piecewise.base | fit | Fit the optimal piecewise binning according to the given training
data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : BasePWBinning
Fitted optimal piecewise binning.
| def fit(self, x, y, lb=None, ub=None, check_input=False):
"""Fit the optimal piecewise binning according to the given training
data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : BasePWBinning
Fitted optimal piecewise binning.
"""
return self._fit(x, y, lb, ub, check_input)
| (self, x, y, lb=None, ub=None, check_input=False) |
4,673 | optbinning.binning.piecewise.continuous_binning | fit_transform | Fit the optimal piecewise binning according to the given training
data, then transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean and any
numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean and any
numerical value.
lb : float or None (default=None)
Avoid values below the lower bound lb.
ub : float or None (default=None)
Avoid values above the upper bound ub.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
| def fit_transform(self, x, y, metric_special=0, metric_missing=0,
lb=None, ub=None, check_input=False):
"""Fit the optimal piecewise binning according to the given training
data, then transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean and any
numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean and any
numerical value.
lb : float or None (default=None)
Avoid values below the lower bound lb.
ub : float or None (default=None)
Avoid values above the upper bound ub.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, check_input).transform(
x, metric_special, metric_missing, lb, ub, check_input)
| (self, x, y, metric_special=0, metric_missing=0, lb=None, ub=None, check_input=False) |
4,676 | optbinning.binning.piecewise.base | information | Print overview information about the options settings, problem
statistics, and the solution of the computation.
Parameters
----------
print_level : int (default=1)
Level of details.
| def information(self, print_level=1):
"""Print overview information about the options settings, problem
statistics, and the solution of the computation.
Parameters
----------
print_level : int (default=1)
Level of details.
"""
self._check_is_fitted()
if not isinstance(print_level, numbers.Integral) or print_level < 0:
raise ValueError("print_level must be an integer >= 0; got {}."
.format(print_level))
if self._optimizer is not None:
solver = self._optimizer
time_solver = self._time_solver
else:
solver = None
time_solver = 0
dict_user_options = self.get_params()
if self._problem_type == "regression":
dict_user_options["estimator"] = None
print_binning_information(print_level, self.name, self._status,
self.solver, solver, self._time_total,
self._time_preprocessing,
self._time_estimator, self._time_prebinning,
time_solver, self._time_postprocessing,
self._n_bins, dict_user_options)
| (self, print_level=1) |
4,677 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``check_input`` parameter in ``fit``.
lb : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``lb`` parameter in ``fit``.
ub : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``ub`` parameter in ``fit``.
x : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``x`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: optbinning.binning.piecewise.continuous_binning.ContinuousOptimalPWBinning, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$', lb: Union[bool, NoneType, str] = '$UNCHANGED$', ub: Union[bool, NoneType, str] = '$UNCHANGED$', x: Union[bool, NoneType, str] = '$UNCHANGED$') -> optbinning.binning.piecewise.continuous_binning.ContinuousOptimalPWBinning |
4,679 | sklearn.utils._metadata_requests | set_transform_request | Request metadata passed to the ``transform`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``transform`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``transform``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``check_input`` parameter in ``transform``.
lb : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``lb`` parameter in ``transform``.
metric_missing : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``metric_missing`` parameter in ``transform``.
metric_special : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``metric_special`` parameter in ``transform``.
ub : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``ub`` parameter in ``transform``.
x : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``x`` parameter in ``transform``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: optbinning.binning.piecewise.continuous_binning.ContinuousOptimalPWBinning, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$', lb: Union[bool, NoneType, str] = '$UNCHANGED$', metric_missing: Union[bool, NoneType, str] = '$UNCHANGED$', metric_special: Union[bool, NoneType, str] = '$UNCHANGED$', ub: Union[bool, NoneType, str] = '$UNCHANGED$', x: Union[bool, NoneType, str] = '$UNCHANGED$') -> optbinning.binning.piecewise.continuous_binning.ContinuousOptimalPWBinning |
4,680 | optbinning.binning.piecewise.continuous_binning | transform | Transform given data using bins from the fitted optimal piecewise
binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean and any
numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean and any
numerical value.
lb : float or None (default=None)
Avoid values below the lower bound lb.
ub : float or None (default=None)
Avoid values above the upper bound ub.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
| def transform(self, x, metric_special=0, metric_missing=0,
lb=None, ub=None, check_input=False):
"""Transform given data using bins from the fitted optimal piecewise
binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean and any
numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean and any
numerical value.
lb : float or None (default=None)
Avoid values below the lower bound lb.
ub : float or None (default=None)
Avoid values above the upper bound ub.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
self._check_is_fitted()
return transform_continuous_target(
self._optb.splits, x, self._c, lb, ub, self._n_records_special,
self._sum_special, self._n_records_missing, self._sum_missing,
self.special_codes, metric_special, metric_missing, check_input)
| (self, x, metric_special=0, metric_missing=0, lb=None, ub=None, check_input=False) |
4,681 | optbinning.binning.mdlp | MDLP |
Minimum Description Length Principle (MDLP) discretization algorithm.
Parameters
----------
min_samples_split : int (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int (default=2)
The minimum number of samples required to be at a leaf node.
max_candidates : int (default=32)
The maximum number of split points to evaluate at each partition.
Notes
-----
Implementation of the discretization algorithm in [FI93]. A dynamic
split strategy based on binning the number of candidate splits [CMR2001]
is implemented to increase efficiency. For large size datasets, it is
recommended to use a smaller ``max_candidates`` (e.g. 16) to get a
significant speed up.
References
----------
.. [FI93] U. M. Fayyad and K. B. Irani. "Multi-Interval Discretization of
Continuous-Valued Attributes for Classification Learning".
International Joint Conferences on Artificial Intelligence,
13:1022–1027, 1993.
.. [CMR2001] D. M. Chickering, C. Meek and R. Rounthwaite. "Efficient
Determination of Dynamic Split Points in a Decision Tree". In
Proceedings of the 2001 IEEE International Conference on Data
Mining, 91-98, 2001.
| class MDLP(BaseEstimator):
"""
Minimum Description Length Principle (MDLP) discretization algorithm.
Parameters
----------
min_samples_split : int (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int (default=2)
The minimum number of samples required to be at a leaf node.
max_candidates : int (default=32)
The maximum number of split points to evaluate at each partition.
Notes
-----
Implementation of the discretization algorithm in [FI93]. A dynamic
split strategy based on binning the number of candidate splits [CMR2001]
is implemented to increase efficiency. For large size datasets, it is
recommended to use a smaller ``max_candidates`` (e.g. 16) to get a
significant speed up.
References
----------
.. [FI93] U. M. Fayyad and K. B. Irani. "Multi-Interval Discretization of
Continuous-Valued Attributes for Classification Learning".
International Joint Conferences on Artificial Intelligence,
13:1022–1027, 1993.
.. [CMR2001] D. M. Chickering, C. Meek and R. Rounthwaite. "Efficient
Determination of Dynamic Split Points in a Decision Tree". In
Proceedings of the 2001 IEEE International Conference on Data
Mining, 91-98, 2001.
"""
def __init__(self, min_samples_split=2, min_samples_leaf=2,
max_candidates=32):
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_candidates = max_candidates
# auxiliary
self._splits = []
self._is_fitted = None
def fit(self, x, y):
"""Fit MDLP discretization algorithm.
Parameters
----------
x : array-like, shape = (n_samples)
Data samples, where n_samples is the number of samples.
y : array-like, shape = (n_samples)
Target vector relative to x.
Returns
-------
self : MDLP
"""
return self._fit(x, y)
def _fit(self, x, y):
_check_parameters(**self.get_params())
x = check_array(x, ensure_2d=False, force_all_finite=True)
y = check_array(y, ensure_2d=False, force_all_finite=True)
idx = np.argsort(x)
x = x[idx]
y = y[idx]
self._recurse(x, y, 0)
self._is_fitted = True
return self
def _recurse(self, x, y, id):
u_x = np.unique(x)
n_x = len(u_x)
n_y = len(np.bincount(y))
split = self._find_split(u_x, x, y)
if split is not None:
self._splits.append(split)
t = np.searchsorted(x, split, side="right")
if not self._terminate(n_x, n_y, y, y[:t], y[t:]):
self._recurse(x[:t], y[:t], id + 1)
self._recurse(x[t:], y[t:], id + 2)
def _find_split(self, u_x, x, y):
n_x = len(x)
u_x = np.unique(0.5 * (x[1:] + x[:-1])[(y[1:] - y[:-1]) != 0])
if len(u_x) > self.max_candidates:
percentiles = np.linspace(1, 100, self.max_candidates)
splits = np.percentile(u_x, percentiles)
else:
splits = u_x
max_entropy_gain = 0
best_split = None
tt = np.searchsorted(x, splits, side="right")
for i, t in enumerate(tt):
samples_l = t >= self.min_samples_leaf
samples_r = n_x - t >= self.min_samples_leaf
if samples_l and samples_r:
entropy_gain = self._entropy_gain(y, y[:t], y[t:])
if entropy_gain > max_entropy_gain:
max_entropy_gain = entropy_gain
best_split = splits[i]
return best_split
def _entropy(self, x):
n = len(x)
ns1 = np.sum(x)
ns0 = n - ns1
p = np.array([ns0, ns1]) / n
return -special.xlogy(p, p).sum()
def _entropy_gain(self, y, y1, y2):
n = len(y)
n1 = len(y1)
n2 = n - n1
ent_y = self._entropy(y)
ent_y1 = self._entropy(y1)
ent_y2 = self._entropy(y2)
return ent_y - (n1 * ent_y1 + n2 * ent_y2) / n
def _terminate(self, n_x, n_y, y, y1, y2):
splittable = (n_x >= self.min_samples_split) and (n_y >= 2)
n = len(y)
n1 = len(y1)
n2 = n - n1
ent_y = self._entropy(y)
ent_y1 = self._entropy(y1)
ent_y2 = self._entropy(y2)
gain = ent_y - (n1 * ent_y1 + n2 * ent_y2) / n
k = len(np.bincount(y))
k1 = len(np.bincount(y1))
k2 = len(np.bincount(y2))
t0 = np.log(3**k - 2)
t1 = k * ent_y
t2 = k1 * ent_y1
t3 = k2 * ent_y2
delta = t0 - (t1 - t2 - t3)
return gain <= (np.log(n - 1) + delta) / n or not splittable
@property
def splits(self):
"""List of split points
Returns
-------
splits : numpy.ndarray
"""
if not self._is_fitted:
raise NotFittedError("This {} instance is not fitted yet. Call "
"'fit' with appropriate arguments."
.format(self.__class__.__name__))
return np.sort(self._splits)
| (min_samples_split=2, min_samples_leaf=2, max_candidates=32) |
4,683 | optbinning.binning.mdlp | __init__ | null | def __init__(self, min_samples_split=2, min_samples_leaf=2,
max_candidates=32):
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_candidates = max_candidates
# auxiliary
self._splits = []
self._is_fitted = None
| (self, min_samples_split=2, min_samples_leaf=2, max_candidates=32) |
4,689 | optbinning.binning.mdlp | _entropy | null | def _entropy(self, x):
n = len(x)
ns1 = np.sum(x)
ns0 = n - ns1
p = np.array([ns0, ns1]) / n
return -special.xlogy(p, p).sum()
| (self, x) |
4,690 | optbinning.binning.mdlp | _entropy_gain | null | def _entropy_gain(self, y, y1, y2):
n = len(y)
n1 = len(y1)
n2 = n - n1
ent_y = self._entropy(y)
ent_y1 = self._entropy(y1)
ent_y2 = self._entropy(y2)
return ent_y - (n1 * ent_y1 + n2 * ent_y2) / n
| (self, y, y1, y2) |
4,691 | optbinning.binning.mdlp | _find_split | null | def _find_split(self, u_x, x, y):
n_x = len(x)
u_x = np.unique(0.5 * (x[1:] + x[:-1])[(y[1:] - y[:-1]) != 0])
if len(u_x) > self.max_candidates:
percentiles = np.linspace(1, 100, self.max_candidates)
splits = np.percentile(u_x, percentiles)
else:
splits = u_x
max_entropy_gain = 0
best_split = None
tt = np.searchsorted(x, splits, side="right")
for i, t in enumerate(tt):
samples_l = t >= self.min_samples_leaf
samples_r = n_x - t >= self.min_samples_leaf
if samples_l and samples_r:
entropy_gain = self._entropy_gain(y, y[:t], y[t:])
if entropy_gain > max_entropy_gain:
max_entropy_gain = entropy_gain
best_split = splits[i]
return best_split
| (self, u_x, x, y) |
4,692 | optbinning.binning.mdlp | _fit | null | def _fit(self, x, y):
_check_parameters(**self.get_params())
x = check_array(x, ensure_2d=False, force_all_finite=True)
y = check_array(y, ensure_2d=False, force_all_finite=True)
idx = np.argsort(x)
x = x[idx]
y = y[idx]
self._recurse(x, y, 0)
self._is_fitted = True
return self
| (self, x, y) |
4,697 | optbinning.binning.mdlp | _recurse | null | def _recurse(self, x, y, id):
u_x = np.unique(x)
n_x = len(u_x)
n_y = len(np.bincount(y))
split = self._find_split(u_x, x, y)
if split is not None:
self._splits.append(split)
t = np.searchsorted(x, split, side="right")
if not self._terminate(n_x, n_y, y, y[:t], y[t:]):
self._recurse(x[:t], y[:t], id + 1)
self._recurse(x[t:], y[t:], id + 2)
| (self, x, y, id) |
4,700 | optbinning.binning.mdlp | _terminate | null | def _terminate(self, n_x, n_y, y, y1, y2):
splittable = (n_x >= self.min_samples_split) and (n_y >= 2)
n = len(y)
n1 = len(y1)
n2 = n - n1
ent_y = self._entropy(y)
ent_y1 = self._entropy(y1)
ent_y2 = self._entropy(y2)
gain = ent_y - (n1 * ent_y1 + n2 * ent_y2) / n
k = len(np.bincount(y))
k1 = len(np.bincount(y1))
k2 = len(np.bincount(y2))
t0 = np.log(3**k - 2)
t1 = k * ent_y
t2 = k1 * ent_y1
t3 = k2 * ent_y2
delta = t0 - (t1 - t2 - t3)
return gain <= (np.log(n - 1) + delta) / n or not splittable
| (self, n_x, n_y, y, y1, y2) |
4,703 | optbinning.binning.mdlp | fit | Fit MDLP discretization algorithm.
Parameters
----------
x : array-like, shape = (n_samples)
Data samples, where n_samples is the number of samples.
y : array-like, shape = (n_samples)
Target vector relative to x.
Returns
-------
self : MDLP
| def fit(self, x, y):
"""Fit MDLP discretization algorithm.
Parameters
----------
x : array-like, shape = (n_samples)
Data samples, where n_samples is the number of samples.
y : array-like, shape = (n_samples)
Target vector relative to x.
Returns
-------
self : MDLP
"""
return self._fit(x, y)
| (self, x, y) |
4,706 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
x : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``x`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: optbinning.binning.mdlp.MDLP, *, x: Union[bool, NoneType, str] = '$UNCHANGED$') -> optbinning.binning.mdlp.MDLP |
4,708 | optbinning.binning.multiclass_binning | MulticlassOptimalBinning | Optimal binning of a numerical variable with respect to a multiclass or
multilabel target.
**Note that the maximum number of classes is set to 100**. To ease
visualization of the binning table, a set of 100 maximally distinct colors
is generated using the library `glasbey
<https://github.com/taketwo/glasbey>`_.
Parameters
----------
name : str, optional (default="")
The variable name.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "quantile" to generate prebins with approximately same
frequency and "uniform" to generate prebins with equal width. Method
"cart" uses `sklearn.tree.DecistionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
solver : str, optional (default="cp")
The optimizer to solve the optimal binning problem. Supported solvers
are "mip" to choose a mixed-integer programming solver or "cp" to
choose a constrained programming solver.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend : str, array-like or None, optional (default="auto")
The **event rate** monotonic trend. Supported trends are “auto”,
"auto_heuristic" and "auto_asc_desc" to automatically determine the
trend maximizing IV using a machine learning classifier, a list of
monotonic trends combining "auto", "auto_heuristic", "auto_asc_desc",
"ascending", "descending", "concave", "convex", "peak", "valley",
"peak_heuristic", "valley_heuristic" and None, one for each class.
If None, then the monotonic constraint is disabled.
min_event_rate_diff : float, optional (default=0)
The minimum event rate difference between consecutives bins.
.. versionadded:: 0.17.0
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The Z-test is used to detect bins
not satisfying the p-value constraint.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
outlier_detector : str or None, optional (default=None)
The outlier detection method. Supported methods are "range" to use
the interquartile range based method or "zcore" to use the modified
Z-score method.
outlier_params : dict or None, optional (default=None)
Dictionary of parameters to pass to the outlier detection method.
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points.
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
special_codes : array-like, dict or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
mip_solver : str, optional (default="bop")
The mixed-integer programming solver. Supported solvers are "bop" to
choose the Google OR-Tools binary optimizer or "cbc" to choose the
COIN-OR Branch-and-Cut solver CBC.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
**prebinning_kwargs : keyword arguments
The pre-binning keywrord arguments.
.. versionadded:: 0.6.1
Notes
-----
The parameter values ``max_n_prebins`` and ``min_prebin_size`` control
complexity and memory usage. The default values generally produce quality
results, however, some improvement can be achieved by increasing
``max_n_prebins`` and/or decreasing ``min_prebin_size``.
The pre-binning refinement phase guarantee that no prebin has either zero
counts of non-events or events by merging those pure prebins. Pure bins
produce infinity WoE and event rates.
| class MulticlassOptimalBinning(OptimalBinning):
"""Optimal binning of a numerical variable with respect to a multiclass or
multilabel target.
**Note that the maximum number of classes is set to 100**. To ease
visualization of the binning table, a set of 100 maximally distinct colors
is generated using the library `glasbey
<https://github.com/taketwo/glasbey>`_.
Parameters
----------
name : str, optional (default="")
The variable name.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "quantile" to generate prebins with approximately same
frequency and "uniform" to generate prebins with equal width. Method
"cart" uses `sklearn.tree.DecistionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
solver : str, optional (default="cp")
The optimizer to solve the optimal binning problem. Supported solvers
are "mip" to choose a mixed-integer programming solver or "cp" to
choose a constrained programming solver.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend : str, array-like or None, optional (default="auto")
The **event rate** monotonic trend. Supported trends are “auto”,
"auto_heuristic" and "auto_asc_desc" to automatically determine the
trend maximizing IV using a machine learning classifier, a list of
monotonic trends combining "auto", "auto_heuristic", "auto_asc_desc",
"ascending", "descending", "concave", "convex", "peak", "valley",
"peak_heuristic", "valley_heuristic" and None, one for each class.
If None, then the monotonic constraint is disabled.
min_event_rate_diff : float, optional (default=0)
The minimum event rate difference between consecutives bins.
.. versionadded:: 0.17.0
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The Z-test is used to detect bins
not satisfying the p-value constraint.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
outlier_detector : str or None, optional (default=None)
The outlier detection method. Supported methods are "range" to use
the interquartile range based method or "zcore" to use the modified
Z-score method.
outlier_params : dict or None, optional (default=None)
Dictionary of parameters to pass to the outlier detection method.
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points.
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
special_codes : array-like, dict or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
mip_solver : str, optional (default="bop")
The mixed-integer programming solver. Supported solvers are "bop" to
choose the Google OR-Tools binary optimizer or "cbc" to choose the
COIN-OR Branch-and-Cut solver CBC.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
**prebinning_kwargs : keyword arguments
The pre-binning keywrord arguments.
.. versionadded:: 0.6.1
Notes
-----
The parameter values ``max_n_prebins`` and ``min_prebin_size`` control
complexity and memory usage. The default values generally produce quality
results, however, some improvement can be achieved by increasing
``max_n_prebins`` and/or decreasing ``min_prebin_size``.
The pre-binning refinement phase guarantee that no prebin has either zero
counts of non-events or events by merging those pure prebins. Pure bins
produce infinity WoE and event rates.
"""
def __init__(self, name="", prebinning_method="cart", solver="cp",
max_n_prebins=20, min_prebin_size=0.05,
min_n_bins=None, max_n_bins=None, min_bin_size=None,
max_bin_size=None, monotonic_trend="auto",
min_event_rate_diff=0, max_pvalue=None,
max_pvalue_policy="consecutive", outlier_detector=None,
outlier_params=None, user_splits=None, user_splits_fixed=None,
special_codes=None, split_digits=None, mip_solver="bop",
time_limit=100, verbose=False, **prebinning_kwargs):
self.name = name
self.dtype = "numerical"
self.prebinning_method = prebinning_method
self.solver = solver
self.max_n_prebins = max_n_prebins
self.min_prebin_size = min_prebin_size
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.monotonic_trend = monotonic_trend
self.min_event_rate_diff = min_event_rate_diff
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.outlier_detector = outlier_detector
self.outlier_params = outlier_params
self.user_splits = user_splits
self.user_splits_fixed = user_splits_fixed
self.special_codes = special_codes
self.split_digits = split_digits
self.mip_solver = mip_solver
self.time_limit = time_limit
self.verbose = verbose
self.prebinning_kwargs = prebinning_kwargs
# auxiliary
self._n_event = None
self._n_event_missing = None
self._n_event_special = None
self._problem_type = "classification"
# info
self._binning_table = None
self._classes = None
self._n_classes = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples = None
self._optimizer = None
self._splits_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
def fit(self, x, y, check_input=False):
"""Fit the optimal binning according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : MulticlassOptimalBinning
Fitted optimal binning.
"""
return self._fit(x, y, check_input)
def fit_transform(self, x, y, metric="mean_woe", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Fit the optimal binning according to the given training data, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
metric : str, optional (default="mean_woe")
The metric used to transform the input vector. Supported metrics
are "mean_woe" to choose the mean of Weight of Evidence (WoE),
"weighted_mean_woe" to choose weighted mean of WoE using the
number of records per class as weights, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean WoE
or weighted mean WoE, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean WoE
or weighted mean WoE, and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, check_input).transform(
x, metric, metric_special, metric_missing, show_digits,
check_input)
def transform(self, x, metric="mean_woe", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Transform given data to mean Weight of Evidence (WoE) or weighted
mean WoE using bins from the fitted optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str, optional (default="mean_woe")
The metric used to transform the input vector. Supported metrics
are "mean_woe" to choose the mean of Weight of Evidence (WoE),
"weighted_mean_woe" to choose weighted mean of WoE using the
number of records per class as weights, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean WoE
or weighted mean WoE, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean WoE
or weighted mean WoE, and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
self._check_is_fitted()
return transform_multiclass_target(self._splits_optimal, x,
self._n_event, self.special_codes,
metric, metric_special,
metric_missing, show_digits,
check_input)
def _fit(self, x, y, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
_, _, _, _, _, _, _] = split_data(
self.dtype, x, y, special_codes=self.special_codes,
check_input=check_input, outlier_detector=self.outlier_detector,
outlier_params=self.outlier_params)
# Check that x_clean is numerical
if x_clean.dtype == np.dtype("object"):
raise ValueError("x array after removing special codes and "
"missing values must be numerical.")
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.outlier_detector is not None:
n_outlier = self._n_samples-(n_clean + n_missing + n_special)
logger.info("Pre-processing: number of outlier samples: {}"
.format(n_outlier))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
if self.user_splits is not None:
n_splits = len(self.user_splits)
if self.verbose:
logger.info("Pre-binning: user splits supplied: {}"
.format(n_splits))
user_splits = check_array(self.user_splits, ensure_2d=False,
dtype=None, force_all_finite=True)
if len(set(user_splits)) != len(user_splits):
raise ValueError("User splits are not unique.")
sorted_idx = np.argsort(user_splits)
user_splits = user_splits[sorted_idx]
if self.user_splits_fixed is not None:
self.user_splits_fixed = np.asarray(
self.user_splits_fixed)[sorted_idx]
splits, n_nonevent, n_event = self._prebinning_refinement(
user_splits, x_clean, y_clean, y_missing, x_special, y_special,
None)
else:
splits, n_nonevent, n_event = self._fit_prebinning(
x_clean, y_clean, y_missing, x_special, y_special, None)
self._n_prebins = len(n_nonevent)
self._time_prebinning = time.perf_counter() - time_prebinning
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning: number of refinements: {}"
.format(self._n_refinements))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
self._fit_optimizer(splits, n_nonevent, n_event)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
if not len(splits):
n_event = np.empty(self._n_classes, dtype=np.int64)
for i, cl in enumerate(self._classes):
n_event[i] = target_info(y_clean, cl)[0]
self._n_event = multiclass_bin_info(
self._solution, self._n_classes, n_event, self._n_event_missing,
self._n_event_special)
self._binning_table = MulticlassBinningTable(
self.name, self.special_codes, self._splits_optimal, self._n_event,
self._classes)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
def _prebinning_refinement(self, splits_prebinning, x, y, y_missing,
x_special, y_special, y_others=None,
sw_clean=None, sw_missing=None, sw_special=None,
sw_others=None):
self._classes = np.unique(y)
self._n_classes = len(self._classes)
if self._n_classes > 100:
raise ValueError("Maximum number of classes exceeded; got {}."
.format(self._n_classes))
self._n_event_special = target_info_special_multiclass(
self.special_codes, x_special, y_special, self._classes)
self._n_event_missing = [target_info(y_missing, cl)[0]
for cl in self._classes]
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
if self.split_digits is not None:
splits_prebinning = np.round(splits_prebinning, self.split_digits)
splits_prebinning, n_nonevent, n_event = self._compute_prebins(
splits_prebinning, x, y)
return splits_prebinning, n_nonevent, n_event
def _fit_optimizer(self, splits, n_nonevent, n_event):
if self.verbose:
logger.info("Optimizer started.")
time_init = time.perf_counter()
if not len(n_nonevent):
self._status = "OPTIMAL"
self._splits_optimal = splits
self._solution = np.zeros(len(splits), dtype=bool)
if self.verbose:
logger.warning("Optimizer: no bins after pre-binning.")
logger.warning("Optimizer: solver not run.")
logger.info("Optimizer terminated. Time: 0s")
return
if self.min_bin_size is not None:
min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples))
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples))
else:
max_bin_size = self.max_bin_size
# Monotonic trend
trend_changes = [None] * self._n_classes
auto_monotonic_modes = ("auto", "auto_heuristic", "auto_asc_desc")
if self.monotonic_trend in auto_monotonic_modes:
monotonic = [auto_monotonic(n_nonevent[:, i], n_event[:, i],
self.monotonic_trend)
for i in range(len(self._classes))]
if self.verbose:
logger.info("Optimizer: classifier predicts {} "
"monotonic trends.".format(monotonic))
elif isinstance(self.monotonic_trend, list):
if len(self.monotonic_trend) != self._n_classes:
raise ValueError("List of monotonic trends must be of size "
"n_classes.")
monotonic = []
for i, m_trend in enumerate(self.monotonic_trend):
if m_trend in auto_monotonic_modes:
trend = auto_monotonic(n_nonevent[:, i], n_event[:, i],
m_trend)
if m_trend == "auto_heuristic":
if trend in ("peak", "valley"):
if trend == "peak":
trend = "peak_heuristic"
else:
trend = "valley_heuristic"
event_rate = n_event[:, i] / (n_nonevent[:, i] +
n_event[:, i])
trend_change = peak_valley_trend_change_heuristic(
event_rate, trend)
trend_changes[i] = trend_change
monotonic.append(trend)
if self.verbose:
logger.info("Optimizer: classifier predicts {} "
"monotonic trend.".format(trend))
else:
monotonic.append(m_trend)
elif self.monotonic_trend is None:
monotonic = [None] * self._n_classes
if self.verbose:
logger.info("Optimizer: monotonic trend not set.")
if self.solver == "cp":
optimizer = MulticlassBinningCP(monotonic, self.min_n_bins,
self.max_n_bins, min_bin_size,
max_bin_size,
self.min_event_rate_diff,
self.max_pvalue,
self.max_pvalue_policy,
self.user_splits_fixed,
self.time_limit)
else:
optimizer = MulticlassBinningMIP(monotonic, self.min_n_bins,
self.max_n_bins, min_bin_size,
max_bin_size,
self.min_event_rate_diff,
self.max_pvalue,
self.max_pvalue_policy,
self.mip_solver,
self.user_splits_fixed,
self.time_limit)
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model(n_nonevent, n_event, trend_changes)
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
self._splits_optimal = splits[solution[:-1]]
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
def _compute_prebins(self, splits_prebinning, x, y):
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
indices = np.digitize(x, splits_prebinning, right=False)
n_bins = n_splits + 1
n_nonevent = np.empty((n_bins, self._n_classes), dtype=np.int64)
n_event = np.empty((n_bins, self._n_classes), dtype=np.int64)
mask_remove = np.zeros(n_bins, dtype=bool)
for idx, cl in enumerate(self._classes):
y1 = (y == cl)
y0 = ~y1
for i in range(n_bins):
mask = (indices == i)
n_nonevent[i, idx] = np.count_nonzero(y0 & mask)
n_event[i, idx] = np.count_nonzero(y1 & mask)
mask_remove |= (n_nonevent[:, idx] == 0) | (n_event[:, idx] == 0)
if np.any(mask_remove):
self._n_refinements += 1
mask_splits = np.concatenate(
[mask_remove[:-2], [mask_remove[-2] | mask_remove[-1]]])
if self.user_splits_fixed is not None:
user_splits_fixed = np.asarray(self.user_splits_fixed)
user_splits = np.asarray(self.user_splits)
fixed_remove = user_splits_fixed & mask_splits
if any(fixed_remove):
raise ValueError("Fixed user_splits {} are removed "
"because produce pure prebins. Provide "
"different splits to be fixed."
.format(user_splits[fixed_remove]))
# Update boolean array of fixed user splits.
self.user_splits_fixed = user_splits_fixed[~mask_splits]
self.user_splits = user_splits[~mask_splits]
splits = splits_prebinning[~mask_splits]
if self.verbose:
logger.info("Pre-binning: number prebins removed: {}"
.format(np.count_nonzero(mask_remove)))
[splits_prebinning, n_nonevent, n_event] = self._compute_prebins(
splits, x, y)
return splits_prebinning, n_nonevent, n_event
@property
def binning_table(self):
"""Return an instantiated binning table. Please refer to
:ref:`Binning table: multiclass target`.
Returns
-------
binning_table : MulticlassBinningTable.
"""
self._check_is_fitted()
return self._binning_table
@property
def classes(self):
"""List of classes.
Returns
-------
classes : numpy.ndarray
"""
self._check_is_fitted()
return self._classes
@property
def splits(self):
"""List of optimal split points.
Returns
-------
splits : numpy.ndarray
"""
self._check_is_fitted()
return self._splits_optimal
def to_json(self, path):
"""
Save optimal bins and/or splits points and transformation depending on
the target type.
Parameters
----------
path: The path where the json is going to be saved.
"""
if path is None:
raise ValueError('Specify the path for the json file.')
table = self.binning_table
opt_bin_dict = dict()
opt_bin_dict['name'] = table.name
opt_bin_dict['special_codes'] = table.special_codes
opt_bin_dict['splits'] = table.splits.tolist()
opt_bin_dict['n_event'] = table.n_event.tolist()
opt_bin_dict['classes'] = table.classes.tolist()
with open(path, "w") as write_file:
json.dump(opt_bin_dict, write_file)
def read_json(self, path):
"""
Read json file containing split points and set them as the new split
points.
Parameters
----------
path: The path of the json file.
"""
if path is None:
raise ValueError('Specify the path for the json file.')
self._is_fitted = True
with open(path, "r") as read_file:
multi_table_attr = json.load(read_file)
for key in multi_table_attr.keys():
if isinstance(multi_table_attr[key], list):
multi_table_attr[key] = np.array(multi_table_attr[key])
self._binning_table = MulticlassBinningTable(**multi_table_attr)
| (name='', prebinning_method='cart', solver='cp', max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend='auto', min_event_rate_diff=0, max_pvalue=None, max_pvalue_policy='consecutive', outlier_detector=None, outlier_params=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, mip_solver='bop', time_limit=100, verbose=False, **prebinning_kwargs) |
4,710 | optbinning.binning.multiclass_binning | __init__ | null | def __init__(self, name="", prebinning_method="cart", solver="cp",
max_n_prebins=20, min_prebin_size=0.05,
min_n_bins=None, max_n_bins=None, min_bin_size=None,
max_bin_size=None, monotonic_trend="auto",
min_event_rate_diff=0, max_pvalue=None,
max_pvalue_policy="consecutive", outlier_detector=None,
outlier_params=None, user_splits=None, user_splits_fixed=None,
special_codes=None, split_digits=None, mip_solver="bop",
time_limit=100, verbose=False, **prebinning_kwargs):
self.name = name
self.dtype = "numerical"
self.prebinning_method = prebinning_method
self.solver = solver
self.max_n_prebins = max_n_prebins
self.min_prebin_size = min_prebin_size
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.monotonic_trend = monotonic_trend
self.min_event_rate_diff = min_event_rate_diff
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.outlier_detector = outlier_detector
self.outlier_params = outlier_params
self.user_splits = user_splits
self.user_splits_fixed = user_splits_fixed
self.special_codes = special_codes
self.split_digits = split_digits
self.mip_solver = mip_solver
self.time_limit = time_limit
self.verbose = verbose
self.prebinning_kwargs = prebinning_kwargs
# auxiliary
self._n_event = None
self._n_event_missing = None
self._n_event_special = None
self._problem_type = "classification"
# info
self._binning_table = None
self._classes = None
self._n_classes = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples = None
self._optimizer = None
self._splits_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
| (self, name='', prebinning_method='cart', solver='cp', max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, monotonic_trend='auto', min_event_rate_diff=0, max_pvalue=None, max_pvalue_policy='consecutive', outlier_detector=None, outlier_params=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, mip_solver='bop', time_limit=100, verbose=False, **prebinning_kwargs) |
4,717 | optbinning.binning.multiclass_binning | _compute_prebins | null | def _compute_prebins(self, splits_prebinning, x, y):
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
indices = np.digitize(x, splits_prebinning, right=False)
n_bins = n_splits + 1
n_nonevent = np.empty((n_bins, self._n_classes), dtype=np.int64)
n_event = np.empty((n_bins, self._n_classes), dtype=np.int64)
mask_remove = np.zeros(n_bins, dtype=bool)
for idx, cl in enumerate(self._classes):
y1 = (y == cl)
y0 = ~y1
for i in range(n_bins):
mask = (indices == i)
n_nonevent[i, idx] = np.count_nonzero(y0 & mask)
n_event[i, idx] = np.count_nonzero(y1 & mask)
mask_remove |= (n_nonevent[:, idx] == 0) | (n_event[:, idx] == 0)
if np.any(mask_remove):
self._n_refinements += 1
mask_splits = np.concatenate(
[mask_remove[:-2], [mask_remove[-2] | mask_remove[-1]]])
if self.user_splits_fixed is not None:
user_splits_fixed = np.asarray(self.user_splits_fixed)
user_splits = np.asarray(self.user_splits)
fixed_remove = user_splits_fixed & mask_splits
if any(fixed_remove):
raise ValueError("Fixed user_splits {} are removed "
"because produce pure prebins. Provide "
"different splits to be fixed."
.format(user_splits[fixed_remove]))
# Update boolean array of fixed user splits.
self.user_splits_fixed = user_splits_fixed[~mask_splits]
self.user_splits = user_splits[~mask_splits]
splits = splits_prebinning[~mask_splits]
if self.verbose:
logger.info("Pre-binning: number prebins removed: {}"
.format(np.count_nonzero(mask_remove)))
[splits_prebinning, n_nonevent, n_event] = self._compute_prebins(
splits, x, y)
return splits_prebinning, n_nonevent, n_event
| (self, splits_prebinning, x, y) |
4,718 | optbinning.binning.multiclass_binning | _fit | null | def _fit(self, x, y, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
_, _, _, _, _, _, _] = split_data(
self.dtype, x, y, special_codes=self.special_codes,
check_input=check_input, outlier_detector=self.outlier_detector,
outlier_params=self.outlier_params)
# Check that x_clean is numerical
if x_clean.dtype == np.dtype("object"):
raise ValueError("x array after removing special codes and "
"missing values must be numerical.")
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.outlier_detector is not None:
n_outlier = self._n_samples-(n_clean + n_missing + n_special)
logger.info("Pre-processing: number of outlier samples: {}"
.format(n_outlier))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
if self.user_splits is not None:
n_splits = len(self.user_splits)
if self.verbose:
logger.info("Pre-binning: user splits supplied: {}"
.format(n_splits))
user_splits = check_array(self.user_splits, ensure_2d=False,
dtype=None, force_all_finite=True)
if len(set(user_splits)) != len(user_splits):
raise ValueError("User splits are not unique.")
sorted_idx = np.argsort(user_splits)
user_splits = user_splits[sorted_idx]
if self.user_splits_fixed is not None:
self.user_splits_fixed = np.asarray(
self.user_splits_fixed)[sorted_idx]
splits, n_nonevent, n_event = self._prebinning_refinement(
user_splits, x_clean, y_clean, y_missing, x_special, y_special,
None)
else:
splits, n_nonevent, n_event = self._fit_prebinning(
x_clean, y_clean, y_missing, x_special, y_special, None)
self._n_prebins = len(n_nonevent)
self._time_prebinning = time.perf_counter() - time_prebinning
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning: number of refinements: {}"
.format(self._n_refinements))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
self._fit_optimizer(splits, n_nonevent, n_event)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
if not len(splits):
n_event = np.empty(self._n_classes, dtype=np.int64)
for i, cl in enumerate(self._classes):
n_event[i] = target_info(y_clean, cl)[0]
self._n_event = multiclass_bin_info(
self._solution, self._n_classes, n_event, self._n_event_missing,
self._n_event_special)
self._binning_table = MulticlassBinningTable(
self.name, self.special_codes, self._splits_optimal, self._n_event,
self._classes)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (self, x, y, check_input) |
4,719 | optbinning.binning.multiclass_binning | _fit_optimizer | null | def _fit_optimizer(self, splits, n_nonevent, n_event):
if self.verbose:
logger.info("Optimizer started.")
time_init = time.perf_counter()
if not len(n_nonevent):
self._status = "OPTIMAL"
self._splits_optimal = splits
self._solution = np.zeros(len(splits), dtype=bool)
if self.verbose:
logger.warning("Optimizer: no bins after pre-binning.")
logger.warning("Optimizer: solver not run.")
logger.info("Optimizer terminated. Time: 0s")
return
if self.min_bin_size is not None:
min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples))
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples))
else:
max_bin_size = self.max_bin_size
# Monotonic trend
trend_changes = [None] * self._n_classes
auto_monotonic_modes = ("auto", "auto_heuristic", "auto_asc_desc")
if self.monotonic_trend in auto_monotonic_modes:
monotonic = [auto_monotonic(n_nonevent[:, i], n_event[:, i],
self.monotonic_trend)
for i in range(len(self._classes))]
if self.verbose:
logger.info("Optimizer: classifier predicts {} "
"monotonic trends.".format(monotonic))
elif isinstance(self.monotonic_trend, list):
if len(self.monotonic_trend) != self._n_classes:
raise ValueError("List of monotonic trends must be of size "
"n_classes.")
monotonic = []
for i, m_trend in enumerate(self.monotonic_trend):
if m_trend in auto_monotonic_modes:
trend = auto_monotonic(n_nonevent[:, i], n_event[:, i],
m_trend)
if m_trend == "auto_heuristic":
if trend in ("peak", "valley"):
if trend == "peak":
trend = "peak_heuristic"
else:
trend = "valley_heuristic"
event_rate = n_event[:, i] / (n_nonevent[:, i] +
n_event[:, i])
trend_change = peak_valley_trend_change_heuristic(
event_rate, trend)
trend_changes[i] = trend_change
monotonic.append(trend)
if self.verbose:
logger.info("Optimizer: classifier predicts {} "
"monotonic trend.".format(trend))
else:
monotonic.append(m_trend)
elif self.monotonic_trend is None:
monotonic = [None] * self._n_classes
if self.verbose:
logger.info("Optimizer: monotonic trend not set.")
if self.solver == "cp":
optimizer = MulticlassBinningCP(monotonic, self.min_n_bins,
self.max_n_bins, min_bin_size,
max_bin_size,
self.min_event_rate_diff,
self.max_pvalue,
self.max_pvalue_policy,
self.user_splits_fixed,
self.time_limit)
else:
optimizer = MulticlassBinningMIP(monotonic, self.min_n_bins,
self.max_n_bins, min_bin_size,
max_bin_size,
self.min_event_rate_diff,
self.max_pvalue,
self.max_pvalue_policy,
self.mip_solver,
self.user_splits_fixed,
self.time_limit)
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model(n_nonevent, n_event, trend_changes)
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
self._splits_optimal = splits[solution[:-1]]
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
| (self, splits, n_nonevent, n_event) |
4,725 | optbinning.binning.multiclass_binning | _prebinning_refinement | null | def _prebinning_refinement(self, splits_prebinning, x, y, y_missing,
x_special, y_special, y_others=None,
sw_clean=None, sw_missing=None, sw_special=None,
sw_others=None):
self._classes = np.unique(y)
self._n_classes = len(self._classes)
if self._n_classes > 100:
raise ValueError("Maximum number of classes exceeded; got {}."
.format(self._n_classes))
self._n_event_special = target_info_special_multiclass(
self.special_codes, x_special, y_special, self._classes)
self._n_event_missing = [target_info(y_missing, cl)[0]
for cl in self._classes]
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
if self.split_digits is not None:
splits_prebinning = np.round(splits_prebinning, self.split_digits)
splits_prebinning, n_nonevent, n_event = self._compute_prebins(
splits_prebinning, x, y)
return splits_prebinning, n_nonevent, n_event
| (self, splits_prebinning, x, y, y_missing, x_special, y_special, y_others=None, sw_clean=None, sw_missing=None, sw_special=None, sw_others=None) |
4,730 | optbinning.binning.multiclass_binning | fit | Fit the optimal binning according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : MulticlassOptimalBinning
Fitted optimal binning.
| def fit(self, x, y, check_input=False):
"""Fit the optimal binning according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : MulticlassOptimalBinning
Fitted optimal binning.
"""
return self._fit(x, y, check_input)
| (self, x, y, check_input=False) |
4,731 | optbinning.binning.multiclass_binning | fit_transform | Fit the optimal binning according to the given training data, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
metric : str, optional (default="mean_woe")
The metric used to transform the input vector. Supported metrics
are "mean_woe" to choose the mean of Weight of Evidence (WoE),
"weighted_mean_woe" to choose weighted mean of WoE using the
number of records per class as weights, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean WoE
or weighted mean WoE, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean WoE
or weighted mean WoE, and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
| def fit_transform(self, x, y, metric="mean_woe", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Fit the optimal binning according to the given training data, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
metric : str, optional (default="mean_woe")
The metric used to transform the input vector. Supported metrics
are "mean_woe" to choose the mean of Weight of Evidence (WoE),
"weighted_mean_woe" to choose weighted mean of WoE using the
number of records per class as weights, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean WoE
or weighted mean WoE, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean WoE
or weighted mean WoE, and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, check_input).transform(
x, metric, metric_special, metric_missing, show_digits,
check_input)
| (self, x, y, metric='mean_woe', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,735 | optbinning.binning.multiclass_binning | read_json |
Read json file containing split points and set them as the new split
points.
Parameters
----------
path: The path of the json file.
| def read_json(self, path):
"""
Read json file containing split points and set them as the new split
points.
Parameters
----------
path: The path of the json file.
"""
if path is None:
raise ValueError('Specify the path for the json file.')
self._is_fitted = True
with open(path, "r") as read_file:
multi_table_attr = json.load(read_file)
for key in multi_table_attr.keys():
if isinstance(multi_table_attr[key], list):
multi_table_attr[key] = np.array(multi_table_attr[key])
self._binning_table = MulticlassBinningTable(**multi_table_attr)
| (self, path) |
4,736 | sklearn.utils._metadata_requests | set_fit_request | Request metadata passed to the ``fit`` method.
Note that this method is only relevant if
``enable_metadata_routing=True`` (see :func:`sklearn.set_config`).
Please see :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
The options for each parameter are:
- ``True``: metadata is requested, and passed to ``fit`` if provided. The request is ignored if metadata is not provided.
- ``False``: metadata is not requested and the meta-estimator will not pass it to ``fit``.
- ``None``: metadata is not requested, and the meta-estimator will raise an error if the user provides it.
- ``str``: metadata should be passed to the meta-estimator with this given alias instead of the original name.
The default (``sklearn.utils.metadata_routing.UNCHANGED``) retains the
existing request. This allows you to change the request for some
parameters and not others.
.. versionadded:: 1.3
.. note::
This method is only relevant if this estimator is used as a
sub-estimator of a meta-estimator, e.g. used inside a
:class:`~sklearn.pipeline.Pipeline`. Otherwise it has no effect.
Parameters
----------
check_input : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``check_input`` parameter in ``fit``.
x : str, True, False, or None, default=sklearn.utils.metadata_routing.UNCHANGED
Metadata routing for ``x`` parameter in ``fit``.
Returns
-------
self : object
The updated object.
| def __get__(self, instance, owner):
# we would want to have a method which accepts only the expected args
def func(**kw):
"""Updates the request for provided parameters
This docstring is overwritten below.
See REQUESTER_DOC for expected functionality
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is enabled."
" You can enable it using"
" sklearn.set_config(enable_metadata_routing=True)."
)
if self.validate_keys and (set(kw) - set(self.keys)):
raise TypeError(
f"Unexpected args: {set(kw) - set(self.keys)}. Accepted arguments"
f" are: {set(self.keys)}"
)
requests = instance._get_metadata_request()
method_metadata_request = getattr(requests, self.name)
for prop, alias in kw.items():
if alias is not UNCHANGED:
method_metadata_request.add_request(param=prop, alias=alias)
instance._metadata_request = requests
return instance
# Now we set the relevant attributes of the function so that it seems
# like a normal method to the end user, with known expected arguments.
func.__name__ = f"set_{self.name}_request"
params = [
inspect.Parameter(
name="self",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
annotation=owner,
)
]
params.extend(
[
inspect.Parameter(
k,
inspect.Parameter.KEYWORD_ONLY,
default=UNCHANGED,
annotation=Optional[Union[bool, None, str]],
)
for k in self.keys
]
)
func.__signature__ = inspect.Signature(
params,
return_annotation=owner,
)
doc = REQUESTER_DOC.format(method=self.name)
for metadata in self.keys:
doc += REQUESTER_DOC_PARAM.format(metadata=metadata, method=self.name)
doc += REQUESTER_DOC_RETURN
func.__doc__ = doc
return func
| (self: optbinning.binning.multiclass_binning.MulticlassOptimalBinning, *, check_input: Union[bool, NoneType, str] = '$UNCHANGED$', x: Union[bool, NoneType, str] = '$UNCHANGED$') -> optbinning.binning.multiclass_binning.MulticlassOptimalBinning |
4,739 | optbinning.binning.multiclass_binning | to_json |
Save optimal bins and/or splits points and transformation depending on
the target type.
Parameters
----------
path: The path where the json is going to be saved.
| def to_json(self, path):
"""
Save optimal bins and/or splits points and transformation depending on
the target type.
Parameters
----------
path: The path where the json is going to be saved.
"""
if path is None:
raise ValueError('Specify the path for the json file.')
table = self.binning_table
opt_bin_dict = dict()
opt_bin_dict['name'] = table.name
opt_bin_dict['special_codes'] = table.special_codes
opt_bin_dict['splits'] = table.splits.tolist()
opt_bin_dict['n_event'] = table.n_event.tolist()
opt_bin_dict['classes'] = table.classes.tolist()
with open(path, "w") as write_file:
json.dump(opt_bin_dict, write_file)
| (self, path) |
4,740 | optbinning.binning.multiclass_binning | transform | Transform given data to mean Weight of Evidence (WoE) or weighted
mean WoE using bins from the fitted optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str, optional (default="mean_woe")
The metric used to transform the input vector. Supported metrics
are "mean_woe" to choose the mean of Weight of Evidence (WoE),
"weighted_mean_woe" to choose weighted mean of WoE using the
number of records per class as weights, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean WoE
or weighted mean WoE, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean WoE
or weighted mean WoE, and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
| def transform(self, x, metric="mean_woe", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Transform given data to mean Weight of Evidence (WoE) or weighted
mean WoE using bins from the fitted optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str, optional (default="mean_woe")
The metric used to transform the input vector. Supported metrics
are "mean_woe" to choose the mean of Weight of Evidence (WoE),
"weighted_mean_woe" to choose weighted mean of WoE using the
number of records per class as weights, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean WoE
or weighted mean WoE, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean WoE
or weighted mean WoE, and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
self._check_is_fitted()
return transform_multiclass_target(self._splits_optimal, x,
self._n_event, self.special_codes,
metric, metric_special,
metric_missing, show_digits,
check_input)
| (self, x, metric='mean_woe', metric_special=0, metric_missing=0, show_digits=2, check_input=False) |
4,741 | optbinning.binning.binning | OptimalBinning | Optimal binning of a numerical or categorical variable with respect to a
binary target.
Parameters
----------
name : str, optional (default="")
The variable name.
dtype : str, optional (default="numerical")
The variable data type. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "mdlp" for Minimum Description Length Principle (MDLP),
"quantile" to generate prebins with approximately same frequency and
"uniform" to generate prebins with equal width. Method "cart" uses
`sklearn.tree.DecisionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
solver : str, optional (default="cp")
The optimizer to solve the optimal binning problem. Supported solvers
are "mip" to choose a mixed-integer programming solver, "cp" to choose
a constrained programming solver or "ls" to choose `LocalSolver
<https://www.localsolver.com/>`_.
divergence : str, optional (default="iv")
The divergence measure in the objective function to be maximized.
Supported divergences are "iv" (Information Value or Jeffrey's
divergence), "js" (Jensen-Shannon), "hellinger" (Hellinger divergence)
and "triangular" (triangular discrimination).
.. versionadded:: 0.7.0
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
min_bin_n_nonevent : int or None, optional (default=None)
The minimum number of non-event records for each bin. If None,
``min_bin_n_nonevent = 1``.
max_bin_n_nonevent : int or None, optional (default=None)
The maximum number of non-event records for each bin. If None, then an
unlimited number of non-event records for each bin.
min_bin_n_event : int or None, optional (default=None)
The minimum number of event records for each bin. If None,
``min_bin_n_event = 1``.
max_bin_n_event : int or None, optional (default=None)
The maximum number of event records for each bin. If None, then an
unlimited number of event records for each bin.
monotonic_trend : str or None, optional (default="auto")
The **event rate** monotonic trend. Supported trends are “auto”,
"auto_heuristic" and "auto_asc_desc" to automatically determine the
trend maximizing IV using a machine learning classifier, "ascending",
"descending", "concave", "convex", "peak" and "peak_heuristic" to allow
a peak change point, and "valley" and "valley_heuristic" to allow a
valley change point. Trends "auto_heuristic", "peak_heuristic" and
"valley_heuristic" use a heuristic to determine the change point,
and are significantly faster for large size instances (``max_n_prebins
> 20``). Trend "auto_asc_desc" is used to automatically select the best
monotonic trend between "ascending" and "descending". If None, then the
monotonic constraint is disabled.
min_event_rate_diff : float, optional (default=0)
The minimum event rate difference between consecutives bins. For solver
"ls", this option currently only applies when monotonic_trend is
“ascending”, “descending”, “peak_heuristic” or “valley_heuristic”.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The Z-test is used to detect bins
not satisfying the p-value constraint. Option supported by solvers
"cp" and "mip".
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
gamma : float, optional (default=0)
Regularization strength to reduce the number of dominating bins. Larger
values specify stronger regularization. Option supported by solvers
"cp" and "mip".
.. versionadded:: 0.3.0
outlier_detector : str or None, optional (default=None)
The outlier detection method. Supported methods are "range" to use
the interquartile range based method or "zcore" to use the modified
Z-score method.
outlier_params : dict or None, optional (default=None)
Dictionary of parameters to pass to the outlier detection method.
class_weight : dict, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If None, all classes are supposed to have weight one. Check
`sklearn.tree.DecisionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
cat_cutoff : float or None, optional (default=None)
Generate bin others with categories in which the fraction of
occurrences is below the ``cat_cutoff`` value. This option is
available when ``dtype`` is "categorical".
cat_unknown : float, str or None (default=None)
The assigned value to the unobserved categories in training but
occurring during transform.
If None, the assigned value to an unknown category follows this rule:
- if transform metric == 'woe' then woe(mean event rate) = 0
- if transform metric == 'event_rate' then mean event rate
- if transform metric == 'indices' then -1
- if transform metric == 'bins' then 'unknown'
.. versionadded:: 0.17.1
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points when ``dtype`` is "numerical" or
the list of prebins when ``dtype`` is "categorical".
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
.. versionadded:: 0.5.0
special_codes : array-like, dict or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
mip_solver : str, optional (default="bop")
The mixed-integer programming solver. Supported solvers are "bop" to
choose the Google OR-Tools binary optimizer or "cbc" to choose the
COIN-OR Branch-and-Cut solver CBC.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
**prebinning_kwargs : keyword arguments
The pre-binning keyword arguments.
.. versionadded:: 0.6.1
Notes
-----
The parameter values ``max_n_prebins`` and ``min_prebin_size`` control
complexity and memory usage. The default values generally produce quality
results, however, some improvement can be achieved by increasing
``max_n_prebins`` and/or decreasing ``min_prebin_size``. A parameter value
``max_n_prebins`` greater than 100 is only recommended if ``solver="ls"``.
The pre-binning refinement phase guarantee that no prebin has either zero
counts of non-events or events by merging those pure prebins. Pure bins
produce infinity WoE and IV measures.
The mathematical formulation when ``solver="ls"`` does **not** currently
support the ``max_pvalue`` constraint.
| class OptimalBinning(BaseOptimalBinning):
"""Optimal binning of a numerical or categorical variable with respect to a
binary target.
Parameters
----------
name : str, optional (default="")
The variable name.
dtype : str, optional (default="numerical")
The variable data type. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "mdlp" for Minimum Description Length Principle (MDLP),
"quantile" to generate prebins with approximately same frequency and
"uniform" to generate prebins with equal width. Method "cart" uses
`sklearn.tree.DecisionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
solver : str, optional (default="cp")
The optimizer to solve the optimal binning problem. Supported solvers
are "mip" to choose a mixed-integer programming solver, "cp" to choose
a constrained programming solver or "ls" to choose `LocalSolver
<https://www.localsolver.com/>`_.
divergence : str, optional (default="iv")
The divergence measure in the objective function to be maximized.
Supported divergences are "iv" (Information Value or Jeffrey's
divergence), "js" (Jensen-Shannon), "hellinger" (Hellinger divergence)
and "triangular" (triangular discrimination).
.. versionadded:: 0.7.0
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
min_bin_n_nonevent : int or None, optional (default=None)
The minimum number of non-event records for each bin. If None,
``min_bin_n_nonevent = 1``.
max_bin_n_nonevent : int or None, optional (default=None)
The maximum number of non-event records for each bin. If None, then an
unlimited number of non-event records for each bin.
min_bin_n_event : int or None, optional (default=None)
The minimum number of event records for each bin. If None,
``min_bin_n_event = 1``.
max_bin_n_event : int or None, optional (default=None)
The maximum number of event records for each bin. If None, then an
unlimited number of event records for each bin.
monotonic_trend : str or None, optional (default="auto")
The **event rate** monotonic trend. Supported trends are “auto”,
"auto_heuristic" and "auto_asc_desc" to automatically determine the
trend maximizing IV using a machine learning classifier, "ascending",
"descending", "concave", "convex", "peak" and "peak_heuristic" to allow
a peak change point, and "valley" and "valley_heuristic" to allow a
valley change point. Trends "auto_heuristic", "peak_heuristic" and
"valley_heuristic" use a heuristic to determine the change point,
and are significantly faster for large size instances (``max_n_prebins
> 20``). Trend "auto_asc_desc" is used to automatically select the best
monotonic trend between "ascending" and "descending". If None, then the
monotonic constraint is disabled.
min_event_rate_diff : float, optional (default=0)
The minimum event rate difference between consecutives bins. For solver
"ls", this option currently only applies when monotonic_trend is
“ascending”, “descending”, “peak_heuristic” or “valley_heuristic”.
max_pvalue : float or None, optional (default=None)
The maximum p-value among bins. The Z-test is used to detect bins
not satisfying the p-value constraint. Option supported by solvers
"cp" and "mip".
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
gamma : float, optional (default=0)
Regularization strength to reduce the number of dominating bins. Larger
values specify stronger regularization. Option supported by solvers
"cp" and "mip".
.. versionadded:: 0.3.0
outlier_detector : str or None, optional (default=None)
The outlier detection method. Supported methods are "range" to use
the interquartile range based method or "zcore" to use the modified
Z-score method.
outlier_params : dict or None, optional (default=None)
Dictionary of parameters to pass to the outlier detection method.
class_weight : dict, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If None, all classes are supposed to have weight one. Check
`sklearn.tree.DecisionTreeClassifier
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeClassifier.html>`_.
cat_cutoff : float or None, optional (default=None)
Generate bin others with categories in which the fraction of
occurrences is below the ``cat_cutoff`` value. This option is
available when ``dtype`` is "categorical".
cat_unknown : float, str or None (default=None)
The assigned value to the unobserved categories in training but
occurring during transform.
If None, the assigned value to an unknown category follows this rule:
- if transform metric == 'woe' then woe(mean event rate) = 0
- if transform metric == 'event_rate' then mean event rate
- if transform metric == 'indices' then -1
- if transform metric == 'bins' then 'unknown'
.. versionadded:: 0.17.1
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points when ``dtype`` is "numerical" or
the list of prebins when ``dtype`` is "categorical".
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
.. versionadded:: 0.5.0
special_codes : array-like, dict or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
mip_solver : str, optional (default="bop")
The mixed-integer programming solver. Supported solvers are "bop" to
choose the Google OR-Tools binary optimizer or "cbc" to choose the
COIN-OR Branch-and-Cut solver CBC.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
**prebinning_kwargs : keyword arguments
The pre-binning keyword arguments.
.. versionadded:: 0.6.1
Notes
-----
The parameter values ``max_n_prebins`` and ``min_prebin_size`` control
complexity and memory usage. The default values generally produce quality
results, however, some improvement can be achieved by increasing
``max_n_prebins`` and/or decreasing ``min_prebin_size``. A parameter value
``max_n_prebins`` greater than 100 is only recommended if ``solver="ls"``.
The pre-binning refinement phase guarantee that no prebin has either zero
counts of non-events or events by merging those pure prebins. Pure bins
produce infinity WoE and IV measures.
The mathematical formulation when ``solver="ls"`` does **not** currently
support the ``max_pvalue`` constraint.
"""
def __init__(self, name="", dtype="numerical", prebinning_method="cart",
solver="cp", divergence="iv", max_n_prebins=20,
min_prebin_size=0.05, min_n_bins=None, max_n_bins=None,
min_bin_size=None, max_bin_size=None, min_bin_n_nonevent=None,
max_bin_n_nonevent=None, min_bin_n_event=None,
max_bin_n_event=None, monotonic_trend="auto",
min_event_rate_diff=0, max_pvalue=None,
max_pvalue_policy="consecutive", gamma=0,
outlier_detector=None, outlier_params=None, class_weight=None,
cat_cutoff=None, cat_unknown=None, user_splits=None,
user_splits_fixed=None, special_codes=None, split_digits=None,
mip_solver="bop", time_limit=100, verbose=False,
**prebinning_kwargs):
self.name = name
self.dtype = dtype
self.prebinning_method = prebinning_method
self.solver = solver
self.divergence = divergence
self.max_n_prebins = max_n_prebins
self.min_prebin_size = min_prebin_size
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.min_bin_n_event = min_bin_n_event
self.max_bin_n_event = max_bin_n_event
self.min_bin_n_nonevent = min_bin_n_nonevent
self.max_bin_n_nonevent = max_bin_n_nonevent
self.monotonic_trend = monotonic_trend
self.min_event_rate_diff = min_event_rate_diff
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.gamma = gamma
self.outlier_detector = outlier_detector
self.outlier_params = outlier_params
self.class_weight = class_weight
self.cat_cutoff = cat_cutoff
self.cat_unknown = cat_unknown
self.user_splits = user_splits
self.user_splits_fixed = user_splits_fixed
self.special_codes = special_codes
self.split_digits = split_digits
self.mip_solver = mip_solver
self.time_limit = time_limit
self.verbose = verbose
self.prebinning_kwargs = prebinning_kwargs
# auxiliary
self._flag_min_n_event_nonevent = False
self._categories = None
self._cat_others = None
self._n_event = None
self._n_nonevent = None
self._n_nonevent_missing = None
self._n_event_missing = None
self._n_nonevent_special = None
self._n_event_special = None
self._n_nonevent_cat_others = None
self._n_event_cat_others = None
self._problem_type = "classification"
# info
self._binning_table = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples = None
self._optimizer = None
self._solution = None
self._splits_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
def fit(self, x, y, sample_weight=None, check_input=False):
"""Fit the optimal binning according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : OptimalBinning
Fitted optimal binning.
"""
return self._fit(x, y, sample_weight, check_input)
def fit_transform(self, x, y, sample_weight=None, metric="woe",
metric_special=0, metric_missing=0, show_digits=2,
check_input=False):
"""Fit the optimal binning according to the given training data, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
sample_weight : array-like of shape (n_samples,) (default=None)
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Only applied if ``prebinning_method="cart"``.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate, and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, sample_weight, check_input).transform(
x, metric, metric_special, metric_missing, show_digits,
check_input)
def transform(self, x, metric="woe", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Transform given data to Weight of Evidence (WoE) or event rate using
bins from the fitted optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="woe")
The metric used to transform the input vector. Supported metrics
are "woe" to choose the Weight of Evidence, "event_rate" to
choose the event rate, "indices" to assign the corresponding
indices of the bins and "bins" to assign the corresponding
bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical WoE or
event rate and any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero WoE or event rate.
"""
self._check_is_fitted()
return transform_binary_target(self._splits_optimal, self.dtype, x,
self._n_nonevent, self._n_event,
self.special_codes, self._categories,
self._cat_others, self.cat_unknown,
metric, metric_special, metric_missing,
self.user_splits, show_digits,
check_input)
def information(self, print_level=1):
"""Print overview information about the options settings, problem
statistics, and the solution of the computation.
Parameters
----------
print_level : int (default=1)
Level of details.
"""
self._check_is_fitted()
if not isinstance(print_level, numbers.Integral) or print_level < 0:
raise ValueError("print_level must be an integer >= 0; got {}."
.format(print_level))
binning_type = self.__class__.__name__.lower()
if self._optimizer is not None:
solver = self._optimizer
time_solver = self._time_solver
else:
solver = None
time_solver = 0
dict_user_options = self.get_params()
print_binning_information(binning_type, print_level, self.name,
self._status, self.solver, solver,
self._time_total, self._time_preprocessing,
self._time_prebinning, time_solver,
self._time_optimizer,
self._time_postprocessing, self._n_prebins,
self._n_refinements, dict_user_options)
def _fit(self, x, y, sample_weight, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
y_others, categories, cat_others, sw_clean, sw_missing,
sw_special, sw_others] = split_data(
self.dtype, x, y, self.special_codes, self.cat_cutoff,
self.user_splits, check_input, self.outlier_detector,
self.outlier_params, None, None, self.class_weight, sample_weight)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.outlier_detector is not None:
n_outlier = self._n_samples-(n_clean + n_missing + n_special)
logger.info("Pre-processing: number of outlier samples: "
"{}".format(n_outlier))
if self.dtype == "categorical":
n_categories = len(categories)
n_categories_others = len(cat_others)
n_others = len(y_others)
logger.info("Pre-processing: number of others samples: "
"{}".format(n_others))
logger.info("Pre-processing: number of categories: {}"
.format(n_categories))
logger.info("Pre-processing: number of categories "
"others: {}".format(n_categories_others))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
if self.user_splits is not None:
n_splits = len(self.user_splits)
if self.verbose:
logger.info("Pre-binning: user splits supplied: {}"
.format(n_splits))
if not n_splits:
splits = self.user_splits
n_nonevent = np.array([])
n_event = np.array([])
else:
if self.dtype == "numerical":
user_splits = check_array(
self.user_splits, ensure_2d=False, dtype=None,
force_all_finite=True)
if len(set(user_splits)) != len(user_splits):
raise ValueError("User splits are not unique.")
sorted_idx = np.argsort(user_splits)
user_splits = user_splits[sorted_idx]
else:
[categories, user_splits, x_clean, y_clean, y_others,
cat_others, sw_clean, sw_others, sorted_idx,
] = preprocessing_user_splits_categorical(
self.user_splits, x_clean, y_clean, sw_clean)
if self.user_splits_fixed is not None:
self.user_splits_fixed = np.asarray(
self.user_splits_fixed)[sorted_idx]
splits, n_nonevent, n_event = self._prebinning_refinement(
user_splits, x_clean, y_clean, y_missing, x_special,
y_special, y_others, sw_clean, sw_missing, sw_special,
sw_others)
else:
splits, n_nonevent, n_event = self._fit_prebinning(
x_clean, y_clean, y_missing, x_special, y_special, y_others,
self.class_weight, sw_clean, sw_missing, sw_special, sw_others)
self._n_prebins = len(n_nonevent)
self._categories = categories
self._cat_others = cat_others
self._time_prebinning = time.perf_counter() - time_prebinning
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning: number of refinements: {}"
.format(self._n_refinements))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
self._fit_optimizer(splits, n_nonevent, n_event)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
if not len(splits):
t_info = target_info_samples(y_clean, sw_clean)
n_nonevent = np.array([t_info[0]])
n_event = np.array([t_info[1]])
self._n_nonevent, self._n_event = bin_info(
self._solution, n_nonevent, n_event, self._n_nonevent_missing,
self._n_event_missing, self._n_nonevent_special,
self._n_event_special, self._n_nonevent_cat_others,
self._n_event_cat_others, cat_others)
if self.dtype == "numerical":
min_x = x_clean.min()
max_x = x_clean.max()
else:
min_x = None
max_x = None
self._binning_table = BinningTable(
self.name, self.dtype, self.special_codes, self._splits_optimal,
self._n_nonevent, self._n_event, min_x, max_x, self._categories,
self._cat_others, self.user_splits)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
def _fit_prebinning(self, x, y, y_missing, x_special, y_special, y_others,
class_weight=None, sw_clean=None, sw_missing=None,
sw_special=None, sw_others=None):
min_bin_size = int(np.ceil(self.min_prebin_size * self._n_samples))
prebinning = PreBinning(method=self.prebinning_method,
n_bins=self.max_n_prebins,
min_bin_size=min_bin_size,
problem_type=self._problem_type,
class_weight=class_weight,
**self.prebinning_kwargs
).fit(x, y, sw_clean)
return self._prebinning_refinement(prebinning.splits, x, y, y_missing,
x_special, y_special, y_others,
sw_clean, sw_missing, sw_special,
sw_others)
def _fit_optimizer(self, splits, n_nonevent, n_event):
if self.verbose:
logger.info("Optimizer started.")
time_init = time.perf_counter()
if len(n_nonevent) <= 1:
self._status = "OPTIMAL"
self._splits_optimal = splits
self._solution = np.zeros(len(splits), dtype=bool)
if self.verbose:
logger.warning("Optimizer: {} bins after pre-binning."
.format(len(n_nonevent)))
logger.warning("Optimizer: solver not run.")
logger.info("Optimizer terminated. Time: 0s")
return
# Min/max number of bins
if self.min_bin_size is not None:
min_bin_size = int(np.ceil(self.min_bin_size * self._n_samples))
else:
min_bin_size = self.min_bin_size
if self.max_bin_size is not None:
max_bin_size = int(np.ceil(self.max_bin_size * self._n_samples))
else:
max_bin_size = self.max_bin_size
# Min number of event and nonevent per bin
if (self.divergence in ("hellinger", "triangular") and
self._flag_min_n_event_nonevent):
if self.min_bin_n_nonevent is None:
min_bin_n_nonevent = 1
else:
min_bin_n_nonevent = max(self.min_bin_n_nonevent, 1)
if self.min_bin_n_event is None:
min_bin_n_event = 1
else:
min_bin_n_event = max(self.min_bin_n_event, 1)
else:
min_bin_n_nonevent = self.min_bin_n_nonevent
min_bin_n_event = self.min_bin_n_event
# Monotonic trend
trend_change = None
if self.dtype == "numerical":
auto_monotonic_modes = ("auto", "auto_heuristic", "auto_asc_desc")
if self.monotonic_trend in auto_monotonic_modes:
monotonic = auto_monotonic(n_nonevent, n_event,
self.monotonic_trend)
if self.monotonic_trend == "auto_heuristic":
if monotonic in ("peak", "valley"):
if monotonic == "peak":
monotonic = "peak_heuristic"
else:
monotonic = "valley_heuristic"
event_rate = n_event / (n_nonevent + n_event)
trend_change = peak_valley_trend_change_heuristic(
event_rate, monotonic)
if self.verbose:
logger.info("Optimizer: classifier predicts {} "
"monotonic trend.".format(monotonic))
else:
monotonic = self.monotonic_trend
if monotonic in ("peak_heuristic", "valley_heuristic"):
event_rate = n_event / (n_nonevent + n_event)
trend_change = peak_valley_trend_change_heuristic(
event_rate, monotonic)
if self.verbose:
logger.info("Optimizer: trend change position {}."
.format(trend_change))
else:
monotonic = self.monotonic_trend
if monotonic is not None:
monotonic = "ascending"
if self.verbose:
if monotonic is None:
logger.info("Optimizer: monotonic trend not set.")
else:
logger.info("Optimizer: monotonic trend set to {}."
.format(monotonic))
if self.solver == "cp":
optimizer = BinningCP(monotonic, self.min_n_bins, self.max_n_bins,
min_bin_size, max_bin_size,
min_bin_n_event, self.max_bin_n_event,
min_bin_n_nonevent, self.max_bin_n_nonevent,
self.min_event_rate_diff, self.max_pvalue,
self.max_pvalue_policy, self.gamma,
self.user_splits_fixed, self.time_limit)
elif self.solver == "mip":
optimizer = BinningMIP(monotonic, self.min_n_bins, self.max_n_bins,
min_bin_size, max_bin_size,
min_bin_n_event, self.max_bin_n_event,
min_bin_n_nonevent, self.max_bin_n_nonevent,
self.min_event_rate_diff, self.max_pvalue,
self.max_pvalue_policy, self.gamma,
self.user_splits_fixed, self.mip_solver,
self.time_limit)
elif self.solver == "ls":
optimizer = BinningLS(monotonic, self.min_n_bins, self.max_n_bins,
min_bin_size, max_bin_size,
min_bin_n_event, self.max_bin_n_event,
min_bin_n_nonevent, self.max_bin_n_nonevent,
self.min_event_rate_diff, self.max_pvalue,
self.max_pvalue_policy,
self.user_splits_fixed, self.time_limit)
if self.verbose:
logger.info("Optimizer: build model...")
optimizer.build_model(self.divergence, n_nonevent, n_event,
trend_change)
if self.verbose:
logger.info("Optimizer: solve...")
status, solution = optimizer.solve()
self._solution = solution
self._optimizer, self._time_optimizer = solver_statistics(
self.solver, optimizer.solver_)
self._status = status
if self.dtype == "categorical" and self.user_splits is not None:
self._splits_optimal = splits[solution]
else:
self._splits_optimal = splits[solution[:-1]]
self._time_solver = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimizer terminated. Time: {:.4f}s"
.format(self._time_solver))
def _prebinning_refinement(self, splits_prebinning, x, y, y_missing,
x_special, y_special, y_others, sw_clean,
sw_missing, sw_special, sw_others):
y0 = (y == 0)
y1 = ~y0
# Compute n_nonevent and n_event for special, missing and others.
self._n_nonevent_special, self._n_event_special = target_info_special(
self.special_codes, x_special, y_special, sw_special)
self._n_nonevent_missing, self._n_event_missing = target_info_samples(
y_missing, sw_missing)
if len(y_others):
(self._n_nonevent_cat_others,
self._n_event_cat_others) = target_info_samples(
y_others, sw_others)
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
if self.split_digits is not None:
splits_prebinning = np.round(splits_prebinning, self.split_digits)
splits_prebinning, n_nonevent, n_event = self._compute_prebins(
splits_prebinning, x, y0, y1, sw_clean)
return splits_prebinning, n_nonevent, n_event
def _compute_prebins(self, splits_prebinning, x, y0, y1, sw):
n_splits = len(splits_prebinning)
if not n_splits:
return splits_prebinning, np.array([]), np.array([])
if self.dtype == "categorical" and self.user_splits is not None:
indices = np.digitize(x, splits_prebinning, right=True)
n_bins = n_splits
else:
indices = np.digitize(x, splits_prebinning, right=False)
n_bins = n_splits + 1
n_nonevent = np.empty(n_bins, dtype=np.int64)
n_event = np.empty(n_bins, dtype=np.int64)
for i in range(n_bins):
mask = (indices == i)
n_nonevent[i] = np.sum(sw[y0 & mask])
n_event[i] = np.sum(sw[y1 & mask])
mask_remove = (n_nonevent == 0) | (n_event == 0)
if np.any(mask_remove):
if self.divergence in ("hellinger", "triangular"):
self._flag_min_n_event_nonevent = True
else:
self._n_refinements += 1
if (self.dtype == "categorical" and
self.user_splits is not None):
mask_splits = mask_remove
else:
mask_splits = np.concatenate([
mask_remove[:-2], [mask_remove[-2] | mask_remove[-1]]])
if self.user_splits_fixed is not None:
user_splits_fixed = np.asarray(self.user_splits_fixed)
user_splits = np.asarray(self.user_splits)
fixed_remove = user_splits_fixed & mask_splits
if any(fixed_remove):
raise ValueError(
"Fixed user_splits {} are removed "
"because produce pure prebins. Provide "
"different splits to be fixed."
.format(user_splits[fixed_remove]))
# Update boolean array of fixed user splits.
self.user_splits_fixed = user_splits_fixed[~mask_splits]
self.user_splits = user_splits[~mask_splits]
splits = splits_prebinning[~mask_splits]
if self.verbose:
logger.info("Pre-binning: number prebins removed: {}"
.format(np.count_nonzero(mask_remove)))
[splits_prebinning, n_nonevent,
n_event] = self._compute_prebins(splits, x, y0, y1, sw)
return splits_prebinning, n_nonevent, n_event
@property
def binning_table(self):
"""Return an instantiated binning table. Please refer to
:ref:`Binning table: binary target`.
Returns
-------
binning_table : BinningTable
"""
self._check_is_fitted()
return self._binning_table
@property
def splits(self):
"""List of optimal split points when ``dtype`` is set to "numerical" or
list of optimal bins when ``dtype`` is set to "categorical".
Returns
-------
splits : numpy.ndarray
"""
self._check_is_fitted()
if self.dtype == "numerical":
return self._splits_optimal
else:
return bin_categorical(self._splits_optimal, self._categories,
self._cat_others, self.user_splits)
@property
def status(self):
"""The status of the underlying optimization solver.
Returns
-------
status : str
"""
self._check_is_fitted()
return self._status
def to_json(self, path):
"""
Save optimal bins and/or splits points and transformation depending on
the target type.
Parameters
----------
path: The path where the json is going to be saved.
"""
if path is None:
raise ValueError('Specify the path for the json file')
table = self.binning_table
opt_bin_dict = dict()
opt_bin_dict['name'] = table.name
opt_bin_dict['dtype'] = table.dtype
opt_bin_dict['special_codes'] = table.special_codes
if table.dtype == 'numerical':
opt_bin_dict['splits'] = table.splits.tolist()
elif table.dtype == 'categorical':
opt_bin_dict['splits'] = [split.tolist() for split in table.splits]
opt_bin_dict['n_nonevent'] = table.n_nonevent.tolist()
opt_bin_dict['n_event'] = table.n_event.tolist()
opt_bin_dict['min_x'] = table.min_x
opt_bin_dict['max_x'] = table.max_x
opt_bin_dict['categories'] = table.categories
opt_bin_dict['cat_others'] = table.cat_others
opt_bin_dict['user_splits'] = table.user_splits
with open(path, "w") as write_file:
json.dump(opt_bin_dict, write_file)
def read_json(self, path):
"""
Read json file containing split points and set them as the new split
points.
Parameters
----------
path: The path of the json file.
"""
self._is_fitted = True
with open(path, "r") as read_file:
bin_table_attr = json.load(read_file)
for key in bin_table_attr.keys():
if isinstance(bin_table_attr[key], list):
bin_table_attr[key] = np.array(bin_table_attr[key])
self._binning_table = BinningTable(**bin_table_attr)
| (name='', dtype='numerical', prebinning_method='cart', solver='cp', divergence='iv', max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, min_bin_n_nonevent=None, max_bin_n_nonevent=None, min_bin_n_event=None, max_bin_n_event=None, monotonic_trend='auto', min_event_rate_diff=0, max_pvalue=None, max_pvalue_policy='consecutive', gamma=0, outlier_detector=None, outlier_params=None, class_weight=None, cat_cutoff=None, cat_unknown=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, mip_solver='bop', time_limit=100, verbose=False, **prebinning_kwargs) |
4,743 | optbinning.binning.binning | __init__ | null | def __init__(self, name="", dtype="numerical", prebinning_method="cart",
solver="cp", divergence="iv", max_n_prebins=20,
min_prebin_size=0.05, min_n_bins=None, max_n_bins=None,
min_bin_size=None, max_bin_size=None, min_bin_n_nonevent=None,
max_bin_n_nonevent=None, min_bin_n_event=None,
max_bin_n_event=None, monotonic_trend="auto",
min_event_rate_diff=0, max_pvalue=None,
max_pvalue_policy="consecutive", gamma=0,
outlier_detector=None, outlier_params=None, class_weight=None,
cat_cutoff=None, cat_unknown=None, user_splits=None,
user_splits_fixed=None, special_codes=None, split_digits=None,
mip_solver="bop", time_limit=100, verbose=False,
**prebinning_kwargs):
self.name = name
self.dtype = dtype
self.prebinning_method = prebinning_method
self.solver = solver
self.divergence = divergence
self.max_n_prebins = max_n_prebins
self.min_prebin_size = min_prebin_size
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.min_bin_n_event = min_bin_n_event
self.max_bin_n_event = max_bin_n_event
self.min_bin_n_nonevent = min_bin_n_nonevent
self.max_bin_n_nonevent = max_bin_n_nonevent
self.monotonic_trend = monotonic_trend
self.min_event_rate_diff = min_event_rate_diff
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.gamma = gamma
self.outlier_detector = outlier_detector
self.outlier_params = outlier_params
self.class_weight = class_weight
self.cat_cutoff = cat_cutoff
self.cat_unknown = cat_unknown
self.user_splits = user_splits
self.user_splits_fixed = user_splits_fixed
self.special_codes = special_codes
self.split_digits = split_digits
self.mip_solver = mip_solver
self.time_limit = time_limit
self.verbose = verbose
self.prebinning_kwargs = prebinning_kwargs
# auxiliary
self._flag_min_n_event_nonevent = False
self._categories = None
self._cat_others = None
self._n_event = None
self._n_nonevent = None
self._n_nonevent_missing = None
self._n_event_missing = None
self._n_nonevent_special = None
self._n_event_special = None
self._n_nonevent_cat_others = None
self._n_event_cat_others = None
self._problem_type = "classification"
# info
self._binning_table = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples = None
self._optimizer = None
self._solution = None
self._splits_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
| (self, name='', dtype='numerical', prebinning_method='cart', solver='cp', divergence='iv', max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None, max_n_bins=None, min_bin_size=None, max_bin_size=None, min_bin_n_nonevent=None, max_bin_n_nonevent=None, min_bin_n_event=None, max_bin_n_event=None, monotonic_trend='auto', min_event_rate_diff=0, max_pvalue=None, max_pvalue_policy='consecutive', gamma=0, outlier_detector=None, outlier_params=None, class_weight=None, cat_cutoff=None, cat_unknown=None, user_splits=None, user_splits_fixed=None, special_codes=None, split_digits=None, mip_solver='bop', time_limit=100, verbose=False, **prebinning_kwargs) |
4,751 | optbinning.binning.binning | _fit | null | def _fit(self, x, y, sample_weight, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
y_others, categories, cat_others, sw_clean, sw_missing,
sw_special, sw_others] = split_data(
self.dtype, x, y, self.special_codes, self.cat_cutoff,
self.user_splits, check_input, self.outlier_detector,
self.outlier_params, None, None, self.class_weight, sample_weight)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.outlier_detector is not None:
n_outlier = self._n_samples-(n_clean + n_missing + n_special)
logger.info("Pre-processing: number of outlier samples: "
"{}".format(n_outlier))
if self.dtype == "categorical":
n_categories = len(categories)
n_categories_others = len(cat_others)
n_others = len(y_others)
logger.info("Pre-processing: number of others samples: "
"{}".format(n_others))
logger.info("Pre-processing: number of categories: {}"
.format(n_categories))
logger.info("Pre-processing: number of categories "
"others: {}".format(n_categories_others))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
if self.user_splits is not None:
n_splits = len(self.user_splits)
if self.verbose:
logger.info("Pre-binning: user splits supplied: {}"
.format(n_splits))
if not n_splits:
splits = self.user_splits
n_nonevent = np.array([])
n_event = np.array([])
else:
if self.dtype == "numerical":
user_splits = check_array(
self.user_splits, ensure_2d=False, dtype=None,
force_all_finite=True)
if len(set(user_splits)) != len(user_splits):
raise ValueError("User splits are not unique.")
sorted_idx = np.argsort(user_splits)
user_splits = user_splits[sorted_idx]
else:
[categories, user_splits, x_clean, y_clean, y_others,
cat_others, sw_clean, sw_others, sorted_idx,
] = preprocessing_user_splits_categorical(
self.user_splits, x_clean, y_clean, sw_clean)
if self.user_splits_fixed is not None:
self.user_splits_fixed = np.asarray(
self.user_splits_fixed)[sorted_idx]
splits, n_nonevent, n_event = self._prebinning_refinement(
user_splits, x_clean, y_clean, y_missing, x_special,
y_special, y_others, sw_clean, sw_missing, sw_special,
sw_others)
else:
splits, n_nonevent, n_event = self._fit_prebinning(
x_clean, y_clean, y_missing, x_special, y_special, y_others,
self.class_weight, sw_clean, sw_missing, sw_special, sw_others)
self._n_prebins = len(n_nonevent)
self._categories = categories
self._cat_others = cat_others
self._time_prebinning = time.perf_counter() - time_prebinning
if self.verbose:
logger.info("Pre-binning: number of prebins: {}"
.format(self._n_prebins))
logger.info("Pre-binning: number of refinements: {}"
.format(self._n_refinements))
logger.info("Pre-binning terminated. Time: {:.4f}s"
.format(self._time_prebinning))
# Optimization
self._fit_optimizer(splits, n_nonevent, n_event)
# Post-processing
if self.verbose:
logger.info("Post-processing started.")
logger.info("Post-processing: compute binning information.")
time_postprocessing = time.perf_counter()
if not len(splits):
t_info = target_info_samples(y_clean, sw_clean)
n_nonevent = np.array([t_info[0]])
n_event = np.array([t_info[1]])
self._n_nonevent, self._n_event = bin_info(
self._solution, n_nonevent, n_event, self._n_nonevent_missing,
self._n_event_missing, self._n_nonevent_special,
self._n_event_special, self._n_nonevent_cat_others,
self._n_event_cat_others, cat_others)
if self.dtype == "numerical":
min_x = x_clean.min()
max_x = x_clean.max()
else:
min_x = None
max_x = None
self._binning_table = BinningTable(
self.name, self.dtype, self.special_codes, self._splits_optimal,
self._n_nonevent, self._n_event, min_x, max_x, self._categories,
self._cat_others, self.user_splits)
self._time_postprocessing = time.perf_counter() - time_postprocessing
if self.verbose:
logger.info("Post-processing terminated. Time: {:.4f}s"
.format(self._time_postprocessing))
self._time_total = time.perf_counter() - time_init
if self.verbose:
logger.info("Optimal binning terminated. Status: {}. Time: {:.4f}s"
.format(self._status, self._time_total))
# Completed successfully
self._is_fitted = True
return self
| (self, x, y, sample_weight, check_input) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.