code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import re
from ai4water.backend import np, pd
class Resampler(object):
"""Resamples time-series data from one frequency to another frequency.
"""
min_in_freqs = {
'MIN': 1,
'MINUTE': 1,
'DAILY': 1440,
'D': 1440,
'HOURLY': 60,
'HOUR': 60,
'H': 60,
'MONTHLY': 43200,
'M': 43200,
'YEARLY': 525600
}
def __init__(self, data, freq, how='mean', verbosity=1):
"""
Arguments:
data : data to use
freq : frequency at which to transform/resample
how : string or dictionary mapping to columns in data defining how to resample the data.
"""
data = pd.DataFrame(data)
self.orig_df = data.copy()
self.target_freq = self.freq_in_mins_from_string(freq)
self.how = self.check_how(how)
self.verbosity = verbosity
def __call__(self, *args, **kwargs):
if self.target_freq > self.orig_freq:
# we want to calculate at higher/larger time-step
return self.downsample()
else:
# we want to calculate at smaller time-step
return self.upsamle()
@property
def orig_freq(self):
return self.freq_in_mins_from_string(pd.infer_freq(self.orig_df.index))
@property
def allowed_freqs(self):
return self.min_in_freqs.keys()
def check_how(self, how):
if not isinstance(how, str):
assert isinstance(how, dict)
assert len(how) == len(self.orig_df.columns)
else:
assert isinstance(how, str)
how = {col:how for col in self.orig_df.columns}
return how
def downsample(self):
df = pd.DataFrame()
for col in self.orig_df:
_df = downsample_df(self.orig_df[col], how=self.how[col], target_freq=self.target_freq)
df = pd.concat([df, _df], axis=1)
return df
def upsamle(self, drop_nan=True):
df = pd.DataFrame()
for col in self.orig_df:
_df = upsample_df(self.orig_df[col], how=self.how[col], target_freq=self.target_freq)
df = pd.concat([df, _df], axis=1)
# concatenation of dataframes where one sample was upsampled with linear and the other with same, will result
# in different length and thus concatenation will add NaNs to the smaller column.
if drop_nan:
df = df.dropna()
return df
def str_to_mins(self, input_string: str) -> int:
return self.min_in_freqs[input_string]
def freq_in_mins_from_string(self, input_string: str) -> int:
if has_numbers(input_string):
in_minutes = split_freq(input_string)
elif input_string.upper() in ['D', 'H', 'M', 'DAILY', 'HOURLY', 'MONTHLY', 'YEARLY', 'MIN', 'MINUTE']:
in_minutes = self.str_to_mins(input_string.upper())
else:
raise TypeError("invalid input string", input_string)
return int(in_minutes)
def downsample_df(df, how, target_freq):
if isinstance(df, pd.Series):
df = pd.DataFrame(df)
assert how in ['mean', 'sum']
# from low timestep to high timestep i.e from 1 hour to 24 hour
# For quantities like temprature, relative humidity, Q, wind speed
if how == 'mean':
return df.resample(f'{target_freq}min').mean()
# For quantities like 'rain', solar radiation', evapotranspiration'
elif how == 'sum':
return df.resample(f'{target_freq}min').sum()
def upsample_df(df, how:str, target_freq:int):
"""drop_nan: if how='linear', we may """
# from larger timestep to smaller timestep, such as from daily to hourly
out_freq = str(target_freq) + 'min'
if isinstance(df, pd.Series):
df = pd.DataFrame(df)
col_name = df.columns[0]
nan_idx = df.isna() # preserving indices with nan values
assert df.shape[1] <=1
nan_idx_r = nan_idx.resample(out_freq).ffill()
nan_idx_r = nan_idx_r.fillna(False) # the first value was being filled with NaN, idk y?
data_frame = df.copy()
# For quantities like temprature, relative humidity, Q, wind speed, we would like to do an interpolation
if how == 'linear':
data_frame = data_frame.resample(out_freq).interpolate(method='linear')
# filling those interpolated values with NaNs which were NaN before interpolation
data_frame[nan_idx_r] = np.nan
# For quantities like 'rain', solar radiation', evapotranspiration', we would like to distribute them equally
# at smaller time-steps.
elif how == 'same':
# distribute rainfall equally to smaller time steps. like hourly 17.4 will be 1.74 at 6 min resolution
idx = data_frame.index[-1] + get_offset(data_frame.index.freqstr)
data_frame = data_frame.append(data_frame.iloc[[-1]].rename({data_frame.index[-1]: idx}))
data_frame = add_freq(data_frame)
df1 = data_frame.resample(out_freq).ffill().iloc[:-1]
df1[col_name ] /= df1.resample(data_frame.index.freqstr)[col_name ].transform('size')
data_frame = df1.copy()
# filling those interpolated values with NaNs which were NaN before interpolation
data_frame[nan_idx_r] = np.nan
else:
raise ValueError(f"unoknown method to transform '{how}'")
return data_frame
def add_freq(df, assert_feq=False, freq=None, method=None):
idx = df.index.copy()
if idx.freq is None:
_freq = pd.infer_freq(idx)
idx.freq = _freq
if idx.freq is None:
if assert_feq:
df = force_freq(df, freq, method=method)
else:
raise AttributeError('no discernible frequency found. Specify'
' a frequency string with `freq`.'.format())
else:
df.index = idx
return df
def force_freq(data_frame, freq_to_force, method=None):
old_nan_counts = data_frame.isna().sum()
old_shape = data_frame.shape
dr = pd.date_range(data_frame.index[0], data_frame.index[-1], freq=freq_to_force)
df_unique = data_frame[~data_frame.index.duplicated(keep='first')] # first remove duplicate indices if present
if method:
df_idx_sorted = df_unique.sort_index()
df_reindexed = df_idx_sorted.reindex(dr, method='nearest')
else:
df_reindexed = df_unique.reindex(dr, fill_value=np.nan)
df_reindexed.index.freq = pd.infer_freq(df_reindexed.index)
new_nan_counts = df_reindexed.isna().sum()
print('Frequency {} is forced to dataframe, NaN counts changed from {} to {}, shape changed from {} to {}'
.format(df_reindexed.index.freq, old_nan_counts.values, new_nan_counts.values,
old_shape, df_reindexed.shape))
return df_reindexed
def split_freq(freq_str: str) -> int:
match = re.match(r"([0-9]+)([a-z]+)", freq_str, re.I)
if match:
minutes, freq = match.groups()
if freq.upper() in ['H', 'HOURLY', 'HOURS', 'HOUR']:
minutes = int(minutes) * 60
elif freq.upper() in ['D', 'DAILY', 'DAY', 'DAYS']:
minutes = int(minutes) * 1440
return int(minutes)
else:
raise NotImplementedError
TIME_STEP = {'D': 'Day', 'H': 'Hour', 'M': 'MonthEnd'}
def get_offset(freqstr: str) -> str:
offset_step = 1
if freqstr in TIME_STEP:
freqstr = TIME_STEP[freqstr]
elif has_numbers(freqstr):
in_minutes = split_freq(freqstr)
freqstr = 'Minute'
offset_step = int(in_minutes)
offset = getattr(pd.offsets, freqstr)(offset_step)
return offset
def has_numbers(input_string: str) -> bool:
return bool(re.search(r'\d', input_string)) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/preprocessing/resample.py | resample.py |
import scipy
from scipy.stats import norm, rankdata
from ai4water.backend import np
class Features(object):
def __init__(self, x):
self.x = x
class Trends(Features):
"""
Arguments:
x array/list/series: 1d array or array like whose features are to be calculated.
"""
def sen_slope(self, alpha=None):
# https://github.com/USGS-python/trend/blob/master/trend/__init__.py
"""A nonparametric estimate of trend.
Parameters
----------
x : array_like
Observations taken at a fixed frequency.
Notes
-----
This method works with missing or censored data, as long as less <20% of
observations are censored.
References
----------
.. [1] Helsel and Hirsch, R.M. 2002. Statistical Methods in Water Resources.
.. [2] https://vsp.pnnl.gov/help/vsample/nonparametric_estimate_of_trend.htm
"""
s = sen_diff(self.x)
s.sort()
if alpha:
N = len(s)
# calculate confidence limits
C_alpha = norm.ppf(1 - alpha / 2) * np.sqrt(np.nanvar(self.x))
U = int(np.round(1 + (N + C_alpha) / 2))
L = int(np.round((N - C_alpha) / 2))
return np.nanmedian(s), s[L], s[U]
else:
return np.nanmedian(s)
def seasonal_sen_slope(self, period=12, alpha=None):
"""A nonparametric estimate of trend for seasonal time series.
Paramters
---------
x : array_like
Observations taken at a fixed frequency.
period : int
Number of observations in a cycle. The number of seasons.
"""
s = 0
for season in np.arange(0, period):
x_season = self.x[season::period]
s = np.append(s, sen_diff(x_season))
s.sort()
if alpha:
# XXX This code needs to be verified
N = len(s)
# calculate confidence limits
C_alpha = norm.ppf(1-alpha/2)*np.sqrt(np.nanvar(self.x))
U = int(np.round(1 + (N + C_alpha)/2))
L = int(np.round((N - C_alpha)/2))
return np.nanmedian(s), s[L], s[U]
else:
return np.nanmedian(s)
def pettitt(self, alpha=0.05):
"""Pettitt's change-point test
A nonparameteric test for detecting change points in a time series.
Parameters
----------
x : array_like
Observations taken at a fixed frequency.
alpha : float
Significance level
Return
------
The index of the change point of the series, provided that it is
statistically significant.
"""
U_t = np.zeros_like(self.x)
n = len(self.x)
r = rankdata(self.x)
for i in np.arange(n):
U_t[i] = 2 * np.sum(r[:i+1]) - (i+1)*(n-1)
t = np.argmax(np.abs(U_t))
K_t = U_t[t]
p = 2.0 * np.exp((-6.0 * K_t**2)/(n**3 + n**2))
if p > alpha:
return t
else:
return np.nan
def mann_kendall(self, alpha=0.05):
"""Mann-Kendall (MK) is a nonparametric test for monotonic trend.
Parameters
----------
x : array
Observations taken at a fixed frequency.
Returns
-------
z : float
Normalized MK test statistic.
Examples
--------
>>> x = np.random.rand(100) + np.linspace(0,.5,100)
>>> z,p = kendall(x)
Attribution
-----------
Modified from code by Michael Schramn available at
https://github.com/mps9506/Mann-Kendall-Trend/blob/master/mk_test.py
"""
# n = len(self.x)
s = mk_score(self.x)
var_s = mk_score_variance(self.x)
z = mk_z(s, var_s)
# calculate the p_value
p_value = 2*(1-norm.cdf(abs(z))) # two tail test
return p_value
def seasonal_mann_kendall(self, period=12):
""" Seasonal nonparametric test for detecting a monotonic trend.
Parameters
----------
x : array
A sequence of chronologically ordered observations with fixed
frequency.
period : int
The number of observations that define period. This is the number of seasons.
"""
# Compute the SK statistic, S, for each season
s = 0
var_s = 0
for season in np.arange(period):
x_season = self.x[season::period]
s += mk_score(x_season)
var_s += mk_score_variance(x_season)
# Compute the SK test statistic, Z, for each season.
z = mk_z(s, var_s)
# calculate the p_value
p_value = 2*(1-norm.cdf(abs(z))) # two tail test
return p_value
def mk_z(s, var_s):
"""Computes the MK test statistic, Z.
Parameters
----------
s : float
The MK trend statistic, S.
var_s : float
Variance of S.
Returns
-------
MK test statistic, Z.
"""
# calculate the MK test statistic
if s > 0:
z = (s - 1)/np.sqrt(var_s)
elif s < 0:
z = (s + 1)/np.sqrt(var_s)
else:
z = 0
return z
def mk_score_variance(x):
"""Computes corrected variance of S statistic used in Mann-Kendall tests.
Equation 8.4 from Helsel and Hirsch (2002).
Parameters
----------
x : array_like
Returns
-------
Variance of S statistic
Note that this might be equivalent to:
See page 728 of Hirsch and Slack
References
----------
.. [1] Helsel and Hirsch, R.M. 2002. Statistical Methods in Water Resources.
"""
x = x[~np.isnan(x)]
n = len(x)
# calculate the unique data
unique_x = np.unique(x)
# calculate the number of tied groups
g = len(unique_x)
# calculate the var(s)
if n == g: # there is no tie
var_s = (n * (n - 1) * (2 * n + 5)) / 18
else: # there are some ties in data
tp = np.zeros_like(unique_x)
for i in range(len(unique_x)):
tp[i] = sum(x == unique_x[i])
var_s = (n * (n - 1) * (2 * n + 5) - np.sum(tp * (tp - 1) * (2 * tp + 5))) / 18
return var_s
class Stats(Features):
def auc(self):
return
def auto_corr(self):
return
def centroid(self):
return
def slope(self):
return
def zero_crossing_rate(self):
return
def sum_abs_diff(self):
return np.sum(np.abs(np.diff(self.x)))
def min_max_diff(self):
return np.abs(np.max(self.x) - np.min(self.x))
def mean_abs_dev(self):
return np.mean(np.abs(self.x - np.mean(self.x, axis=0)), axis=0)
def median_abs_dev(self):
"""Median absolute deviation"""
return scipy.stats.median_absolute_deviation(self.x, scale=1)
def rms(self):
"""Root mean square"""
return np.sqrt(np.sum(np.array(self.x) ** 2) / len(self.x))
def mk_score(x):
"""Computes S statistic used in Mann-Kendall tests.
Parameters
----------
x : array_like
Chronologically ordered array of observations.
Returns
-------
MK trend statistic (S).
"""
x = x[~np.isnan(x)]
n = len(x)
s = 0
for j in np.arange(1, n):
s += np.sum(np.sign(x[j] - x[0:j]))
return s
def sen_diff(x):
"""Sen's difference operator.
Paramaters
----------
x : array_like
Observations taken at a fixed frequency.
Returns
-------
Sen difference
"""
#x = x[~np.isnan(x)]
n = len(x)
N = int(n*(n-1)/2) # number of slope estimates
s = np.zeros(N)
i = 0
for j in np.arange(1, n):
#s[i:j+i] = (x[j] - x[0:j])/np.arange(1, j+1)
s[i:j+i] = (x[j] - x[0:j])/np.arange(j, 0, -1)
i += j
return s
if __name__ == "__main__":
f = Features(np.random.random(10)) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/preprocessing/seq_features.py | seq_features.py |
import json
import inspect
import warnings
from typing import Union
from copy import copy, deepcopy
import ai4water.datasets as datasets
from ai4water.datasets import all_datasets
from ai4water.utils.utils import TrainTestSplit
from ai4water.utils.plotting_tools import Plots
from ai4water.preprocessing.imputation import Imputation
from ai4water.utils.utils import prepare_data, jsonize, to_datetime_index, print_something
from ai4water.backend import np, pd, plt, os, mpl, sklearn, h5py
from .utils import check_for_classification
from .utils import consider_intervals, decode
from .utils import load_data_from_hdf5
train_test_split = sklearn.model_selection.train_test_split
KFold = sklearn.model_selection.KFold
LeaveOneOut = sklearn.model_selection.LeaveOneOut
TimeSeriesSplit = sklearn.model_selection.TimeSeriesSplit
ShuffleSplit = sklearn.model_selection.ShuffleSplit
Patch = mpl.patches.Patch
cmap_cv = plt.cm.coolwarm
class _DataSet(Plots):
def __init__(self, config, path=os.getcwd()):
Plots.__init__(self, config=config, path=path)
def training_data(self):
raise NotImplementedError
def validation_data(self):
raise NotImplementedError
def test_data(self):
raise NotImplementedError
def KFold_splits(self, n_splits=5):
raise NotImplementedError
def LeaveOneOut_splits(self):
raise NotImplementedError
def TimeSeriesSplit_splits(self, n_splits=5):
raise NotImplementedError
@classmethod
def from_h5(cls, h5_file: str):
raise NotImplementedError
def to_disk(self, path: str):
raise NotImplementedError
def return_xy(self, x, y, initial):
if self.mode == "classification" and self.is_binary:
if len(y) == y.size:
y = y.reshape(-1, 1)
if self.verbosity > 0:
print(f"{'*' * 5} {initial} {'*' * 5}")
print_something(x, "input_x")
print_something(y, "target")
return x, y
def return_x_yy(self, x, prev_y, y, initial):
if self.verbosity > 0:
print(f"{'*' * 5} {initial} data {'*' * 5}")
print_something(x, "input_x")
print_something(prev_y, "prev_y")
print_something(y, "target")
return x, prev_y, y
class DataSet(_DataSet):
"""The purpose of DataSet is to convert unprepared/raw data into prepared data.
A prepared data consists of x,y pairs where x is inputs and y is outputs. There
are >1 examples in a DataSet. Both inputs and outputs consists of same number
of examples. An example consists of one input, output pair which can be given
to a supervised machine learning algorithm for training. For tabular data, the
number of examples does not necessarily match number of rows. The number of
examples depend upon multiple factors such as presence of intervals, how
nans are handled and the arguments related to time series data preparation
which are listed in detail in prepare_data function.
DataSet class can accept the raw, unprepared data in a variety of formats such
as .csv, .xlsx, .parquet, .mat, .n5 etc. For details see this. The DataSet
class can save the prepared data into an hdf5 file which can susequently be
used to load the data and save the time.
Methods
------------
- training_data: returns training data
- validation_data: returns validation data
- test_data: returns test data
- from_h5:
- to_disk
- KFold_splits: creates splits using `KFold` of sklearn
- LeaveOneOut_splits: creates splits using `LeaveOneOut` of sklearn
- TimeSeriesSplit_splits: creates splits using `TimeSeriesSplit` of sklearn
- total_exs
"""
def __init__(
self,
data,
input_features: Union[str, list] = None,
output_features: Union[str, list] = None,
dataset_args: dict = None,
ts_args: dict = None,
split_random: bool = False,
train_fraction: float = 0.7,
val_fraction: float = 0.2,
indices: dict = None,
intervals=None,
shuffle: bool = True,
allow_nan_labels: int = 0,
nan_filler: dict = None,
batch_size: int = 32,
drop_remainder: bool = False,
teacher_forcing: bool = False,
allow_input_nans: bool = False,
seed: int = 313,
verbosity: int = 1,
mode: str = None,
category: str = None,
save: bool = False
):
"""
Initializes the DataSet class
Parameters
----------
data :
source from which to make the data. It can be one of the following:
- pandas dataframe: each columns is a feature and each row is an example
- numpy array
- xarray dataset: it can be xarray dataset
- path like: if the path is the path of a file, then this file can
be a csv/xlsx/nc/npz/mat/parquet/feather file. The .nc file
will be read using xarray to load datasets. If the path refers
to a directory, it is supposed that each file in the directory refers to one example.
- ai4water dataset : name of any of dataset name from ai4water.datasets
- name of .h5 file
input_features : Union[list, dict, str, None]
features to use as input. If `data` is pandas dataframe
then this is list of column names from `data` to be used as input.
output_features : Union[list, dict, str, None]
features to use as output. When `data` is dataframe
then it is list of column names from `data` to be used as output.
If `data` is `dict`, then it must be consistent with `data`.
Default is None,which means the last column of data will be
used as output. In case of multi-class classification, the output
column is not supposed to be one-hot-encoded rather in the form
of [0,1,2,0,1,2,1,2,0] for 3 classes. One-hot-encoding is done
inside the model.
dataset_args : dict
additional arguments for AI4Water's [datasets][ai4water.datasets]
ts_args : dict, optional
This argument should only be used if the data is time series data.
It must be a dictionary which is then passed to :py:func:`ai4water.utils.prepare_data`
for data preparation. Possible keys in dictionay are:
- lookback
- forecast_len
- forecast_step
- input_steps
split_random : bool, optional
whether to split the data into training and test randomly or not.
train_fraction : float
Fraction of the complete data to be used for training
purpose. Must be greater than 0.0.
val_fraction : float
The fraction of the training data to be used for validation.
Set to 0.0 if no validation data is to be used.
indices : dict, optional
A dictionary with two possible keys, 'training', 'validation'.
It determines the indices to be used to select training, validation
and test data. If indices are given for training, then train_fraction
must not be given. If indices are given for validation, then indices
for training must also be given and val_fraction must not be given.
Therefore, the possible keys in indices dictionary are follwoing
- ``training``
- ``training`` and ``validation``
intervals :
tuple of tuples where each tuple consits of two integers, marking
the start and end of interval. An interval here means indices
from the data. Only rows within those indices will be used when preparing
data/batches for NN. This is handy when our input data
contains chunks of missing values or when we don't want to consider several
rows in input data during data_preparation.
For further usage see `examples/using_intervals`
shuffle : bool
whether to shuffle the samples or not
allow_nan_labels : bool
whether to allow examples with nan labels or not.
if it is > 0, and if target values contain Nans, those examples
will not be ignored and will be used as it is.
In such a case a customized training and evaluation
step is performed where the loss is not calculated for predictions
corresponding to nan observations. Thus this option can be useful
when we are predicting more than 1 target and some of the examples
have some of their labels missing. In such a scenario, if we set this
option to >0, we don't need to ignore those samples at all during data
preparation. This option should be set to > 0 only when using tensorflow
for deep learning models. if == 1, then if an example has label [nan, 1]
it will not be removed while the example with label [nan, nan]
will be ignored/removed. If ==2, both examples (mentioned before) will be
considered/will not be removed. This means for multi-outputs, we can end
up having examples whose all labels are nans. if the number of outputs
are just one. Then this must be set to 2 in order to use samples with nan labels.
nan_filler : dict
This argument determines the imputation technique used to fill the nans in
the data. The imputation is actually performed by :py:class:`ai4water.preprocessing.Imputation`
class. Therefore this argument determines the interaction with `Imputation` class.
The default value is None, which will raise error if missing/nan values
are encountered in the input data. The user can however specify a
dictionary whose one key must be `method`. The value of 'method'
key can be `fillna` or `interpolate`. For example, to do forward
filling, the user can do as following
>>> {'method': 'fillna', 'imputer_args': {'method': 'ffill'}}
For details about fillna keyword options see fillna_
For `interpolate`, the user can specify the type of interpolation
for example
>>> {'method': 'interpolate', 'imputer_args': {'method': 'spline', 'order': 2}}
will perform spline interpolation with 2nd order.
For other possible options/keyword arguments for interpolate_
[see]()
The filling or interpolation is done columnwise, however, the user
can specify how to do for each column by providing the above mentioned
arguments as dictionary or list. The sklearn based imputation methods
can also be used in a similar fashion. For KNN
>>> {'method': 'KNNImputer', 'imputer_args': {'n_neighbors': 3}}
or for iterative imputation
>>> {'method': 'IterativeImputer', 'imputer_args': {'n_nearest_features': 2}}
To pass additional arguments one can make use of `imputer_args`
keyword argument
>>> {'method': 'KNNImputer', 'features': ['b'], 'imputer_args': {'n_neighbors': 4}},
For more on sklearn based imputation methods see this blog_
batch_size : int
size of one batch. Only relevent if `drop_remainder` is True.
drop_remainder : bool
whether to drop the remainder if len(data) % batch_size != 0 or not?
teacher_forcing : bool
whether to return previous output/target/ground
truth or not. This is useful when the user wants to feed output
at t-1 as input at timestep t. For details about this technique
see this article_
allow_input_nans : bool, optional
If False, the examples containing nans in inputs will be removed.
Setting this to True will result in feeding nan containing data
to your algorithm unless nans are filled with `nan_filler`.
seed : int
random seed for reproducibility
verbosity : int
mode : str
either ``regression`` or ``classification``
category : str
save : bool
whether to save the data in an h5 file or not.
Example
-------
>>> import pandas as pd
>>> import numpy as np
>>> from ai4water.preprocessing import DataSet
>>> data_ = pd.DataFrame(np.random.randint(0, 1000, (50, 2)), columns=['input', 'output'])
>>> data_set = DataSet(data=data_, ts_args={'lookback':5})
>>> x,y = data_set.training_data()
.. _fillna:
https://pandas.pydata.org/pandas-docs/version/0.22.0/generated/pandas.DataFrame.fillna.html
.. _article:
https://machinelearningmastery.com/teacher-forcing-for-recurrent-neural-networks/
.. _interpolate:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.interpolate.html
.. _blog:
https://scikit-learn.org/stable/auto_examples/impute/plot_missing_values.html#sphx-glr-auto-examples-impute-plot-missing-values-py
Note
----
The word 'index' is not allowed as column name, input_features or output_features
"""
indices = indices or {}
if indices:
assert split_random is False, "indices cannot be used with split_random"
if 'training' in indices:
assert train_fraction == 0.7, f"""
You can not set training data using both indices and train_fraction.
Use either indices or train_fraction."""
if 'validation' in indices:
assert val_fraction == 0.2, f"""
You can not set validation data using both indices and val_fraction.
Use either indices or val_fraction."""
assert 'training' in indices, f"""
when defining validation data using indices, training data must also be
defined using indices."""
assert val_fraction < 1.0, f"""
val_fraction must be less than 1.0 but it is {val_fraction}.
"""
self.dataset_args = dataset_args
self.config = {
'input_features': input_features,
'output_features': output_features
}
self.nan_filler = nan_filler
self.data = self._process_data(
data,
input_features,
output_features)
self.ts_args = ts_args
self.split_random = split_random
self.indices = indices
self.train_fraction = train_fraction
self.val_fraction = val_fraction
self.shuffle = shuffle
self.batch_size = batch_size
self.intervals = intervals
self.allow_nan_labels = allow_nan_labels
self.teacher_forcing = teacher_forcing
self.drop_remainder = drop_remainder
self.allow_input_nans = allow_input_nans
self.verbosity = verbosity
self.seed = seed
self.mode = mode
self.category = category
self.save = save
self.scalers = {}
self.indexes = {}
self.index_types = {}
self._input_features = copy(input_features)
if save and h5py:
self.to_disk()
_DataSet.__init__(self, config=self.config, path=os.getcwd())
def init_paras(self) -> dict:
"""Returns the initializing parameters of this class"""
signature = inspect.signature(self.__init__)
init_paras = {}
for para in signature.parameters.values():
init_paras[para.name] = getattr(self, para.name)
return init_paras
@property
def ts_args(self):
return self._ts_args
@ts_args.setter
def ts_args(self, _ts_args: dict = None):
default_args = {'input_steps': 1,
'lookback': 1,
'forecast_len': 1,
'forecast_step': 0,
'known_future_inputs': False
}
if _ts_args:
default_args.update(_ts_args)
self._ts_args = default_args
@property
def lookback(self):
return self.ts_args['lookback']
@property
def classes(self):
_classes = []
if self.mode == 'classification':
if self.num_outs == 1: # for binary/multiclass
array = self.data[self._output_features].values
_classes = np.unique(array[~np.isnan(array)])
else: # for one-hot encoded
_classes = self._output_features
return _classes
@property
def num_classes(self):
return len(self.classes)
@property
def is_binary(self) -> bool:
"""Returns True if the porblem is binary classification"""
_default = False
if self.mode == 'classification':
if self.num_outs == 1:
array = self.data[self._output_features].values
unique_vals = np.unique(array[~np.isnan(array)])
if len(unique_vals) == 2:
_default = True
else:
pass # todo, check when output columns are one-hot encoded
return _default
@property
def is_multiclass(self) -> bool:
"""Returns True if the porblem is multiclass classification"""
_default = False
if self.mode == 'classification':
if self.num_outs == 1:
array = self.data[self._output_features].values
unique_vals = np.unique(array[~np.isnan(array)])
if len(unique_vals) > 2:
_default = True
else:
pass # todo, check when output columns are one-hot encoded
return _default
@property
def is_multilabel(self) -> bool:
"""Returns True if the porblem is multilabel classification"""
_default = False
if self.mode == 'classification':
if self.num_outs > 1:
_default = True
return _default
@property
def _to_categorical(self):
# whether we have to convert y into one-hot encoded form
_defualt = False
if self.is_binary or self.is_multiclass:
if self.num_outs == 1:
_defualt = True
# it seems sklearn can accept one-hot-encoded targets but xgb, lgbm and catboost can't
# but since since sklearn can also accept non-one-hot-encoded targets for multiclass
# let's not one-hot-encode for all ML algos
if self.category == 'ML':
_defualt = False
return _defualt
@property
def teacher_forcing(self):
return self._teacher_forcing
@teacher_forcing.setter
def teacher_forcing(self, x):
self._teacher_forcing = x
@property
def input_features(self):
_inputs = self.config['input_features']
if _inputs is None and self.data is not None:
assert isinstance(self.data, pd.DataFrame)
_inputs = self.data.columns[0:-1].to_list()
return _inputs
@property
def output_features(self):
"""for external use"""
_outputs = self.config['output_features']
if _outputs is None and self.data is not None:
# assert isinstance(self.data, pd.DataFrame)
if self.data.ndim == 2:
_outputs = [col for col in self.data.columns if col not in self.input_features]
else:
_outputs = [] # todo
return _outputs
@property
def _output_features(self):
"""for internal use"""
_outputs = deepcopy(self.config['output_features'])
if isinstance(self.data, list):
assert isinstance(_outputs, list)
elif isinstance(self.data, dict):
assert isinstance(_outputs, dict), f"""
data is of type dict while output_features are
of type {_outputs.__class__.__name__}"""
for k in self.data.keys():
if k not in _outputs:
_outputs[k] = []
elif _outputs is None and self.data is not None:
assert isinstance(self.data, pd.DataFrame)
_outputs = [col for col in self.data.columns if col not in self.input_features]
return _outputs
@property
def num_ins(self):
return len(self.input_features)
@property
def num_outs(self):
return len(self.output_features)
@property
def batch_dim(self):
default = "3D"
if self.ts_args['lookback'] == 1:
default = "2D"
return default
def _process_data(self,
data,
input_features,
output_features
):
if isinstance(data, str):
_source = self._get_data_from_str(data, input_features, output_features)
if isinstance(_source, str) and _source.endswith('.h5'):
self._from_h5 = True
elif isinstance(data, pd.DataFrame):
_source = self._get_data_from_df(data, input_features, output_features)
elif isinstance(data, np.ndarray):
_source = self._get_data_from_ndarray(data, input_features, output_features)
elif data.__class__.__name__ == "Dataset":
_source = data
elif isinstance(data, list):
raise ValueError(f"""
data is given as a list. For such cases either use DataSetUnion
or DataSetPipeline insteadd of DataSet class""")
elif isinstance(data, dict):
raise ValueError(f"""
data is given as a dictionary. For such cases either use DataSetUnion
or DataSetPipeline insteadd of DataSet class""")
elif data is None:
return data
else:
assert data is not None
raise ValueError(f"""
unregnizable source of data of type {data.__class__.__name__} given
""")
_source = self.impute(_source)
return _source
def _get_data_from_ndarray(self, data, input_features, output_features):
if data.ndim == 2:
# if output_features is not defined, consider 1 output and name it
# as 'output'
if output_features is None:
output_features = ['outout']
self.config['output_features'] = output_features # we should put it in config as well
elif isinstance(output_features, str):
output_features = [output_features]
else:
assert isinstance(output_features, list)
if input_features is None: # define dummy names for input_features
input_features = [f'input_{i}' for i in range(data.shape[1] - len(output_features))]
self.config['input_features'] = input_features
return pd.DataFrame(data, columns=input_features + output_features)
else:
return data
def _get_data_from_df(self, data, input_features, output_features):
if input_features is None and output_features is not None:
if isinstance(output_features, str):
output_features = [output_features]
assert isinstance(output_features, list)
input_features = [col for col in data.columns if col not in output_features]
# since we have inferred the input_features, they should be put
# back into config
self.config['input_features'] = input_features
return data
def _get_data_from_str(self, data, input_features, output_features):
if isinstance(output_features, str):
output_features = [output_features]
# dir path/file path/ ai4water dataset name
if data.endswith('.h5'):
_source = data
if data.endswith('.csv'):
_source = pd.read_csv(data)
if _source.columns[0] in ['index', 'time', 'date']:
_source.index = pd.to_datetime(_source.pop('index'))
elif data.endswith('.xlsx') or data.endswith('xlx'):
_source = pd.read_excel(data)
if _source.columns[0] in ['index', 'time', 'date']:
_source.index = pd.to_datetime(_source.pop('index'))
elif data.endswith('.parquet'):
_source = pd.read_parquet(data)
elif data.endswith('.feather'):
_source = pd.read_feather(data)
if _source.columns[0] in ['index', 'time', 'date']:
_source.index = pd.to_datetime(_source.pop('index'))
# netcdf file
elif data.endswith('.nc'):
import xarray as xr
_source = xr.open_dataset(data)
_source = _source.to_dataframe()
elif data.endswith('npz'):
data = np.load(data)
assert len(data) == 1
d = []
for k, v in data.items():
d.append(v)
data: np.ndarray = d[0]
_source = pd.DataFrame(data, columns=input_features + output_features)
# matlab's mat file
elif data.endswith('.mat'):
import scipy
mat = scipy.io.loadmat(data)
data: np.ndarray = mat['data']
_source = pd.DataFrame(data, columns=input_features + output_features)
elif os.path.isfile(data):
assert os.path.exists(data)
_source = data
elif os.path.isdir(data):
assert len(os.listdir(data)) > 1
# read from directory
raise NotImplementedError
elif data in all_datasets:
_source = self._get_data_from_ai4w_datasets(data)
else:
raise ValueError(f"unregnizable source of data given {data}")
return _source
def _get_data_from_ai4w_datasets(self, data):
Dataset = getattr(datasets, data)
dataset = Dataset()
dataset_args = self.dataset_args
if dataset_args is None:
dataset_args = {}
# if self.config['input_features'] is not None:
dynamic_features = self.input_features + self.output_features
data = dataset.fetch(dynamic_features=dynamic_features,
**dataset_args)
data = data.to_dataframe(['time', 'dynamic_features']).unstack()
data.columns = [a[1] for a in data.columns.to_flat_index()]
return data
def impute(self, data):
"""Imputes the missing values in the data using `Imputation` module"""
if self.nan_filler is not None:
if isinstance(data, pd.DataFrame):
_source = self._impute(data, self.nan_filler)
else:
raise NotImplementedError
else:
_source = data
return _source
def _impute(self, data, impute_config):
if isinstance(impute_config, str):
method, impute_args = impute_config, {}
data = Imputation(data, method=method, **impute_args)()
elif isinstance(impute_config, dict):
data = Imputation(data, **impute_config)()
elif isinstance(impute_config, list):
for imp_conf in impute_config:
data = Imputation(data, **imp_conf)()
else:
raise NotImplementedError(f'{impute_config.__class__.__name__}')
return data
def get_indices(self):
"""If the data is to be divded into train/test based upon indices,
here we create train_indices and test_indices. The train_indices
contain indices for both training and validation data.
"""
tot_obs = self.total_exs(**self.ts_args)
all_indices = np.arange(tot_obs)
if len(self.indices) == 0:
if self.train_fraction < 1.0:
if self.split_random:
train_indices, test_indices = train_test_split(
all_indices,
train_size=self.train_fraction,
random_state=self.seed
)
else:
train_indices, test_indices = self._get_indices_by_seq_split(
all_indices,
self.train_fraction)
else: # no test data
train_indices, test_indices = all_indices, []
else:
_train_indices = self.indices.get('training', None)
_val_indices = self.indices.get('validation', None)
_test_indices = self.indices.get('test', None)
if _train_indices is not None:
if _val_indices is None:
# even if val_fraction is > 0.0, we will separate validation
# data from training later
_val_indices = np.array([]) # no validation set
else:
assert isinstance(np.array(_val_indices), np.ndarray)
_val_indices = np.array(_val_indices)
overlap = np.intersect1d(_train_indices, _val_indices)
assert len(overlap) == 0, f"""
Training and validation indices must be mutually exclusive.
They contain {len(overlap)} overlaping values."""
train_indices = np.sort(np.hstack([_train_indices, _val_indices]))
if _test_indices is None:
# get test_indices by subtracting train_indices from all indices
test_indices = [ind for ind in all_indices if ind not in train_indices]
# _val_indices = np.array([])
else: # todo
train_indices = []
setattr(self, 'train_indices', train_indices)
setattr(self, 'test_indices', test_indices)
return np.array(train_indices).astype("int32"), np.array(test_indices).astype("int32")
def _get_indices_by_seq_split(
self,
all_indices: Union[list, np.ndarray],
train_fraction):
""" sequential train/test split"""
train_indices = all_indices[0:int(train_fraction * len(all_indices))]
test_indices = all_indices[int(train_fraction * len(all_indices)):]
return train_indices, test_indices
def _training_data(self, key="_training", **kwargs):
"""training data including validation data"""
train_indices, test_indices = self.get_indices()
if 'validation' in self.indices:
# when validation indices are given, we first prepare
# complete data which contains training, validation and test data
# TODO this is agains function definition
indices = np.sort(np.hstack([train_indices, test_indices]))
else:
indices = train_indices
data = self.data.copy()
# numpy arrays are not indexed and is supposed that the whole array is
# use as input
if not isinstance(data, np.ndarray):
data = self.indexify(data, key)
# get x,_y, y
x, prev_y, y = self._make_data(
data,
intervals=self.intervals,
indices=indices,
**kwargs)
if not isinstance(self.data, np.ndarray):
x, self.indexes[key] = self.deindexify(x, key)
if self.mode == 'classification':
y = check_for_classification(y, self._to_categorical)
return x, prev_y, y
def training_data(self, key="train", **kwargs):
"""training data excluding validation data"""
if getattr(self, '_from_h5', False):
return load_data_from_hdf5('training_data', self.data)
x, prev_y, y = self._training_data(key=key, **kwargs)
if self.val_fraction > 0.0:
# when no output is generated, corresponding index will not be saved
idx = self.indexes.get(key, np.arange(len(x))) # index also needs to be split
x, prev_y, y, idx = self._train_val_split(x, prev_y, y, idx, 'training')
# if drop remainder, we need to
x, prev_y, y = self.check_for_batch_size(x, prev_y, y)
self.indexes[key] = idx[0:len(x)]
if self.teacher_forcing:
return self.return_x_yy(x, prev_y, y, "Training")
return self.return_xy(x, y, "Training")
def validation_data(self, key="val", **kwargs):
"""validation data"""
if getattr(self, '_from_h5', False):
return load_data_from_hdf5('validation_data', self.data)
x, prev_y, y = self._training_data(key=key, **kwargs)
if self.val_fraction > 0.0:
idx = self.indexes.get(key, np.arange(len(x)))
x, prev_y, y, idx = self._train_val_split(x, prev_y, y, idx, 'validation')
x, prev_y, y = self.check_for_batch_size(x, prev_y, y)
self.indexes[key] = idx[0:len(x)]
else:
x, prev_y, y = np.empty(0), np.empty(0), np.empty(0)
if self.teacher_forcing:
return self.return_x_yy(x, prev_y, y, "Validation")
return self.return_xy(x, y, "Validation")
def _train_val_split(self, x, prev_y, y, idx, return_type):
"""split x,y,idx,prev_y into training and validation data"""
if self.split_random:
# split x,y randomly
splitter = TrainTestSplit(test_fraction=self.val_fraction, seed=self.seed)
train_x, val_x, train_y, val_y = splitter.split_by_random(x, y)
splitter = TrainTestSplit(test_fraction=self.val_fraction, seed=self.seed)
train_idx, val_idx, train_prev_y, val_prev_y = splitter.split_by_random(
idx, prev_y)
elif 'validation' in self.indices:
# separate indices were provided for validation data
# it must be remembered that x,y now contains training+validation+test data
# but based upon indices, we will choose either training or validation data
val_indices = self.indices['validation']
_train_indices, _ = self.get_indices()
train_indices = [i for i in _train_indices if i not in val_indices]
splitter = TrainTestSplit(train_indices=train_indices, test_indices=val_indices)
train_x, val_x, train_y, val_y = splitter.split_by_indices(
x, y
)
splitter = TrainTestSplit(train_indices=train_indices, test_indices=val_indices)
train_idx, val_idx, train_prev_y, val_prev_y = splitter.split_by_indices(
idx, prev_y)
else:
# split x,y sequentially
splitter = TrainTestSplit(test_fraction=self.val_fraction)
train_x, val_x, train_y, val_y = splitter.split_by_slicing(x, y)
splitter = TrainTestSplit(test_fraction=self.val_fraction)
train_idx, val_idx, train_prev_y, val_prev_y = splitter.split_by_slicing(idx, prev_y)
if return_type == "training":
return train_x, train_prev_y, train_y, train_idx
return val_x, val_prev_y, val_y, val_idx
def test_data(self, key="test", **kwargs):
"""test data"""
if getattr(self, '_from_h5', False):
return load_data_from_hdf5('test_data', self.data)
if self.train_fraction < 1.0:
data = self.data.copy()
# numpy arrays are not indexed and is supposed that the whole array
# is use as input
if not isinstance(data, np.ndarray):
data = self.indexify(data, key)
_, test_indices = self.get_indices()
if len(test_indices) > 0: # it is possible that training and validation
# indices cover whole data
# get x,_y, y
x, prev_y, y = self._make_data(
data,
intervals=self.intervals,
indices=test_indices,
**kwargs)
x, prev_y, y = self.check_for_batch_size(x, prev_y, y)
if not isinstance(self.data, np.ndarray):
x, self.indexes[key] = self.deindexify(x, key)
if self.mode == 'classification':
y = check_for_classification(y, self._to_categorical)
else:
x, prev_y, y = np.empty(0), np.empty(0), np.empty(0)
else:
x, prev_y, y = np.empty(0), np.empty(0), np.empty(0)
if self.teacher_forcing:
return self.return_x_yy(x, prev_y, y, "Test")
return self.return_xy(x, y, "Test")
def check_for_batch_size(self, x, prev_y=None, y=None):
if self.drop_remainder:
assert isinstance(x, np.ndarray)
remainder = len(x) % self.batch_size
if remainder:
x = x[0:-remainder]
if prev_y is not None:
prev_y = prev_y[0:-remainder]
if y is not None:
y = y[0:-remainder]
return x, prev_y, y
def check_nans(self, data, input_x, input_y, label_y):
"""Checks whether anns are present or not and checks shapes of arrays
being prepared."""
if isinstance(data, pd.DataFrame):
nans = data[self.output_features].isna()
nans = nans.sum().sum()
data = data.values
else:
nans = np.isnan(data[:, -self.num_outs:])
# df[self.out_cols].isna().sum()
nans = int(nans.sum())
if nans > 0:
if self.allow_nan_labels == 2:
if self.verbosity > 0: print("""
\n{} Allowing NANs in predictions {}\n""".format(10 * '*', 10 * '*'))
elif self.allow_nan_labels == 1:
if self.verbosity > 0: print("""
\n{} Ignoring examples whose all labels are NaNs {}\n
""".format(10 * '*', 10 * '*'))
idx = ~np.array([all([np.isnan(x) for x in label_y[i]]) for i in range(len(label_y))])
input_x = input_x[idx]
input_y = input_y[idx]
label_y = label_y[idx]
if int(np.isnan(data[:, -self.num_outs:][0:self.lookback]).sum() / self.num_outs) >= self.lookback:
self.nans_removed_4m_st = -9999
else:
if self.verbosity > 0:
print('\n{} Removing Examples with nan in labels {}\n'.format(10 * '*', 10 * '*'))
if self.num_outs == 1:
# find out how many nans were present from start of data until
# lookback, these nans will be removed
self.nans_removed_4m_st = np.isnan(data[:, -self.num_outs:][0:self.lookback]).sum()
# find out such labels where 'y' has at least one nan
nan_idx = np.array([np.any(i) for i in np.isnan(label_y)])
non_nan_idx = np.invert(nan_idx)
label_y = label_y[non_nan_idx]
input_x = input_x[non_nan_idx]
input_y = input_y[non_nan_idx]
assert np.isnan(label_y).sum() < 1, """
label still contains {} nans""".format(np.isnan(label_y).sum())
assert input_x.shape[0] == input_y.shape[0] == label_y.shape[0], """
shapes are not same"""
if not self.allow_input_nans:
assert np.isnan(input_x).sum() == 0, """input still contains {} nans
""".format(np.isnan(input_x).sum())
return input_x, input_y, label_y
def indexify(self, data: pd.DataFrame, key):
data = data.copy()
dummy_index = False
# for dataframes
if isinstance(data.index, pd.DatetimeIndex):
index = list(map(int, np.array(data.index.strftime('%Y%m%d%H%M'))))
# datetime index
self.index_types[key] = 'dt'
original_index = pd.Series(index, index=index)
else:
try:
index = list(map(int, np.array(data.index)))
self.index_types[key] = 'int'
original_index = pd.Series(index, index=index)
except ValueError: # index may not be convertible to integer, it may be
# string values
dummy_index = np.arange(len(data), dtype=np.int64)
original_index = pd.Series(data.index, index=dummy_index)
index = dummy_index
self.index_types[key] = 'str'
self.indexes[key] = {'dummy': dummy_index,
'original': original_index}
# pandas will add the 'datetime' column as first column.
# This columns will only be used to keep
# track of indices of train and test data.
data.insert(0, 'index', index)
self._input_features = ['index'] + self.input_features
# setattr(self, 'input_features', ['index'] + self.input_features)
self.indexes[key] = {'index': index, 'dummy_index': dummy_index,
'original': original_index}
return data
def deindexify(self, data: np.ndarray, key):
_data, _index = self.deindexify_nparray(data, key)
if self.indexes[key].get('dummy_index', None) is not None:
_index = self.indexes[key]['original'].loc[_index].values
if self.index_types[key] == 'dt':
_index = to_datetime_index(_index)
return _data, _index
def get_batches(self, data):
if self.batch_dim == "2D":
return self.get_2d_batches(data)
else:
return self.check_nans(data, *prepare_data(data,
num_outputs=self.num_outs,
**self.ts_args))
def get_2d_batches(self, data):
# need to count num_ins based upon _input_features as it consider index
num_ins = len(self._input_features)
if not isinstance(data, np.ndarray):
if isinstance(data, pd.DataFrame):
data = data.values
else:
raise TypeError(f"unknown data type {data.__class__.__name__} for data ")
if self.num_outs > 0:
input_x = data[:, 0:num_ins]
input_y, label_y = data[:, -self.num_outs:], data[:, -self.num_outs:]
else:
dummy_input_y = np.random.random((len(data), self.num_outs))
dummy_y = np.random.random((len(data), self.num_outs))
input_x, input_y, label_y = data[:, 0:num_ins], dummy_input_y, dummy_y
assert self.lookback == 1, """
lookback should be one for MLP/Dense layer based model, but it is {}
""".format(self.lookback)
return self.check_nans(data, input_x, input_y, np.expand_dims(label_y, axis=2))
def _make_data(self, data, indices=None, intervals=None, shuffle=False):
# if indices is not None:
# indices = np.array(indices).astype("int32")
# assert isinstance(np.array(indices), np.ndarray), "indices must be array like"
if isinstance(data, pd.DataFrame):
data = data[self._input_features + self.output_features].copy()
df = data
else:
data = data.copy()
df = data
if intervals is None:
x, prev_y, y = self.get_batches(df)
if indices is not None:
# if indices are given then this should be done after `get_batches`
# method
x = x[indices]
prev_y = prev_y[indices]
y = y[indices]
else:
xs, prev_ys, ys = [], [], []
for _st, _en in intervals:
df1 = data[_st:_en]
if df1.shape[0] > 0:
x, prev_y, y = self.get_batches(df1.values)
xs.append(x)
prev_ys.append(prev_y)
ys.append(y)
if indices is None:
x = np.vstack(xs)
prev_y = np.vstack(prev_ys)
y = np.vstack(ys)
else:
x = np.vstack(xs)[indices]
prev_y = np.vstack(prev_ys)[indices]
y = np.vstack(ys)[indices]
if shuffle:
raise NotImplementedError
if isinstance(data, pd.DataFrame) and 'index' in data:
data.pop('index')
if self.ts_args['forecast_len'] == 1 and len(self.output_features) > 0:
y = y.reshape(-1, len(self.output_features))
return x, prev_y, y
def deindexify_nparray(self, data, key):
if data.ndim == 3:
_data, index = data[..., 1:].astype(np.float32), data[:, -1, 0]
elif data.ndim == 2:
_data, index = data[..., 1:].astype(np.float32), data[:, 0]
elif data.ndim == 4:
_data, index = data[..., 1:].astype(np.float32), data[:, -1, -1, 0]
elif data.ndim == 5:
_data, index = data[..., 1:].astype(np.float32), data[:, -1, -1, -1, 0]
else:
raise NotImplementedError
if self.index_types[key] != 'str':
index = np.array(index, dtype=np.int64)
return _data, index
def total_exs(self,
lookback,
forecast_step=0, forecast_len=1,
**ts_args
):
intervals = self.intervals
input_steps = self.ts_args['input_steps']
data = consider_intervals(self.data, intervals)
num_outs = len(self.output_features) if self.output_features is not None else None
max_tot_obs = 0
if not self.allow_nan_labels and intervals is None:
_data = data[self.input_features + self.output_features] if isinstance(data, pd.DataFrame) else data
x, _, _ = prepare_data(_data,
lookback, num_outputs=num_outs,
forecast_step=forecast_step,
forecast_len=forecast_len, mask=np.nan, **ts_args)
max_tot_obs = len(x)
# we need to ignore some values at the start
more = (lookback * input_steps) - 1
if isinstance(data, np.ndarray):
return len(data) - more
# todo, why not when allow_nan_labels>0?
if forecast_step > 0:
more += forecast_step
if forecast_len > 1:
more += forecast_len
if intervals is None: intervals = [()]
more *= len(intervals)
if self.allow_nan_labels == 2:
tot_obs = data.shape[0] - more
elif self.allow_nan_labels == 1:
label_y = data[self.output_features].values
idx = ~np.array([all([np.isnan(x) for x in label_y[i]]) for i in range(len(label_y))])
tot_obs = np.sum(idx) - more
else:
if num_outs == 1:
tot_obs = data.shape[0] - int(data[self.output_features].isna().sum()) - more
tot_obs = max(tot_obs, max_tot_obs)
else:
# count by droping all the rows when nans occur in output features
tot_obs = len(data.dropna(subset=self.output_features))
tot_obs -= more
return tot_obs
def KFold_splits(self, n_splits=5):
"""returns an iterator for kfold cross validation.
The iterator yields two tuples of training and test x,y pairs.
The iterator on every iteration returns following
`(train_x, train_y), (test_x, test_y)`
Note: only `training_data` and `validation_data` are used to make kfolds.
Example
---------
>>> import numpy as np
>>> import pandas as pd
>>> from ai4water.preprocessing import DataSet
>>> data = pd.DataFrame(np.random.randint(0, 10, (20, 3)), columns=['a', 'b', 'c'])
>>> data_set = DataSet(data=data)
>>> kfold_splits = data_set.KFold_splits()
>>> for (train_x, train_y), (test_x, test_y) in kfold_splits:
... print(train_x, train_y, test_x, test_y)
"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
kf = KFold(n_splits=n_splits,
random_state=self.seed if self.shuffle else None,
shuffle=self.shuffle)
spliter = kf.split(x)
for tr_idx, test_idx in spliter:
yield (x[tr_idx], y[tr_idx]), (x[test_idx], y[test_idx])
def LeaveOneOut_splits(self):
"""Yields leave one out splits
The iterator on every iteration returns following
`(train_x, train_y), (test_x, test_y)`"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
kf = LeaveOneOut()
for tr_idx, test_idx in kf.split(x):
yield (x[tr_idx], y[tr_idx]), (x[test_idx], y[test_idx])
def ShuffleSplit_splits(self, **kwargs):
"""Yields ShuffleSplit splits
The iterator on every iteration returns following
`(train_x, train_y), (test_x, test_y)`"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
sf = ShuffleSplit(**kwargs)
for tr_idx, test_idx in sf.split(x):
yield (x[tr_idx], y[tr_idx]), (x[test_idx], y[test_idx])
def TimeSeriesSplit_splits(self, n_splits=5, **kwargs):
"""returns an iterator for TimeSeriesSplit.
The iterator on every iteration returns following
`(train_x, train_y), (test_x, test_y)`
"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
tscv = TimeSeriesSplit(n_splits=n_splits, **kwargs)
for tr_idx, test_idx in tscv.split(x):
yield (x[tr_idx], y[tr_idx]), (x[test_idx], y[test_idx])
def plot_KFold_splits(self, n_splits=5, show=True, **kwargs):
"""Plots the indices of kfold splits"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
kf = KFold(n_splits=n_splits,
random_state=self.seed if self.shuffle else None,
shuffle=self.shuffle)
spliter = kf.split(x)
self._plot_splits(spliter, x, title="KFoldCV", show=show, **kwargs)
return
def plot_LeaveOneOut_splits(self, show=True, **kwargs):
"""Plots the indices obtained from LeaveOneOut strategy"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
spliter = LeaveOneOut().split(x)
self._plot_splits(spliter=spliter,
x=x,
title="LeaveOneOutCV",
show=show,
**kwargs)
return
def plot_TimeSeriesSplit_splits(self, n_splits=5, show=True, **kwargs):
"""Plots the indices obtained from TimeSeriesSplit strategy"""
if self.teacher_forcing:
warnings.warn("Ignoring prev_y")
x, _, y = self._training_data()
spliter = TimeSeriesSplit(n_splits=n_splits, **kwargs).split(x)
self._plot_splits(spliter=spliter,
x=x,
title="TimeSeriesCV",
show=show,
**kwargs)
return
def _plot_splits(self, spliter, x, show=True, **kwargs):
splits = list(spliter)
figsize = kwargs.get('figsize', (10, 8))
legend_fs = kwargs.get('legend_fs', 20)
legend_pos = kwargs.get('legend_pos', (1.02, 0.8))
title = kwargs.get("title", "CV")
plt.close('all')
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
for ii, split in enumerate(splits):
indices = np.array([np.nan] * len(x))
indices[split[0]] = 1
indices[split[1]] = 0
ax.scatter(range(len(indices)), [ii + .5] * len(indices),
c=indices, marker='_', lw=10, cmap="coolwarm",
vmin=-.2, vmax=1.2)
yticklabels = list(range(len(splits)))
ax.set(yticks=np.arange(len(splits)) + .5, yticklabels=yticklabels)
ax.set_xlabel("Sample Index", fontsize=18)
ax.set_ylabel("CV iteration", fontsize=18)
ax.set_title(title, fontsize=20)
ax.legend([Patch(color=cmap_cv(.8)), Patch(color=cmap_cv(.02))],
['Training', 'Test'],
loc=legend_pos, fontsize=legend_fs)
if show:
plt.tight_layout()
plt.show()
return
def to_disk(self, path: str = None):
import h5py
path = path or os.getcwd()
filepath = os.path.join(path, "data.h5")
f = h5py.File(filepath, mode='w')
for k, v in self.init_paras().items():
if isinstance(v, (dict, list, tuple, float, int, str)):
f.attrs[k] = json.dumps(
v, default=jsonize).encode('utf8')
elif v is not None and k != 'data':
f.attrs[k] = v
if self.teacher_forcing:
x, prev_y, y = self.training_data()
val_x, val_prev_y, val_y = self.validation_data()
test_x, test_prev_y, test_y = self.test_data()
else:
prev_y, val_prev_y, test_prev_y = np.empty(0), np.empty(0), np.empty(0)
x, y = self.training_data()
val_x, val_y = self.validation_data()
test_x, test_y = self.test_data()
# save in disk
self._save_data_to_hdf5('training_data', x, prev_y, y, f)
self._save_data_to_hdf5('validation_data', val_x, val_prev_y, val_y, f)
self._save_data_to_hdf5('test_data', test_x, test_prev_y, test_y, f)
f.close()
return
def _save_data_to_hdf5(self, data_type, x, prev_y, y, f):
"""Saves one data_type in h5py. data_type is string indicating whether
it is training, validation or test data."""
assert x is not None
group_name = f.create_group(data_type)
container = {}
container['x'] = x
if self.teacher_forcing:
container['prev_y'] = prev_y
container['y'] = y
for name, val in container.items():
param_dset = group_name.create_dataset(name, val.shape, dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
return
@classmethod
def from_h5(cls, path):
"""Creates an instance of DataSet from .h5 file."""
import h5py
f = h5py.File(path, mode='r')
config = {}
for k, v in f.attrs.items():
if isinstance(v, str) or isinstance(v, bytes):
v = decode(v)
config[k] = v
cls._from_h5 = True
f.close()
# the data is already being loaded from h5 file so no need to save it again
# upon initialization of class
config['save'] = False
return cls(path, **config) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/preprocessing/dataset/_main.py | _main.py |
__all__ = ['DataSetUnion']
from typing import Union
from ai4water.backend import np, os
from ._main import _DataSet
class DataSetUnion(_DataSet):
"""A Union of datasets concatenated in parallel. A DataSetUnion of four DataSets will be as follows:
=========== =========== =========== ===========
DataSet1 DataSet2 DataSet3 DataSet4
=========== =========== =========== ===========
"""
def __init__(
self,
*datasets,
stack_y : bool = False,
verbosity : int = 1,
**named_datasets
) -> None:
"""
DataSets must be passed either as positional arguments or as keyword arguments
but not both.
Parameters
----------
datasets :
DataSets to be concatenated in parallel.
stack_y : bool
whether to stack y/outputs of individual datasets as one array or not
verbosity : bool
controls amount of information being printed
named_datasets :
DataSets to be concatenated in parallel.
Examples
---------
>>> import pandas as pd
>>> from ai4water.preprocessing import DataSet, DataSetUnion
>>> df1 = pd.DataFrame(np.random.random((100, 10)),
... columns=[f"Feat_{i}" for i in range(10)])
>>> df2 = pd.DataFrame(np.random.random((200, 10)),
... columns=[f"Feat_{i}" for i in range(10)])
>>> ds1 = DataSet(df1)
>>> ds2 = DataSet(df2)
>>> ds = DataSetUnion(ds1, ds2)
>>> train_x, train_y = ds.training_data()
>>> val_x, val_y = ds.validation_data()
>>> test_x, test_y = ds.test_data()
Note
----
DataSets must be provided either as positional arguments or as keyword arguments
using named_datasets and not both.
"""
self.stack_y = stack_y
self.verbosity = verbosity
self.as_list = False
self.as_dict = False
self._datasets = {}
if datasets:
assert not named_datasets, f"""provide either datasets or named_datasets, not both"""
self.as_list = True
for idx, ds in enumerate(datasets):
assert isinstance(ds, _DataSet), f"""
{ds} is not a valid dataset. {ds.__class__.__name__}"""
self._datasets[idx] = ds
if named_datasets:
assert not datasets, f"""provide either datasets or named_datasets, not both"""
self.as_dict = True
for name, ds in named_datasets.items():
assert isinstance(ds, _DataSet), f"""
{ds} is not a valid dataset. {ds.__class__.__name__}"""
self._datasets[name] = ds
self.examples = {}
_DataSet.__init__(self, config={}, path=os.getcwd())
self.index = 0
def __iter__(self):
return self
def __next__(self):
try:
item = self._datasets[self.index]
except KeyError:
self.index = 0
raise StopIteration
self.index += 1
return item
def __getitem__(self, item: int):
return self._datasets[item]
@property
def mode(self):
return list(set([ds.mode for ds in self._datasets.values()]))[0]
@property
def num_datasets(self) -> int:
return len(self._datasets)
@property
def teacher_forcing(self):
return all([ds.teacher_forcing for ds in self._datasets.values()])
@property
def is_binary(self):
return all([ds.is_binary for ds in self._datasets.values()])
@property
def is_multilabel(self):
return all([ds.is_multilabel for ds in self._datasets.values()])
@property
def is_multiclass(self):
return all([ds.is_multiclass for ds in self._datasets.values()])
@property
def input_features(self):
_input_features = {k:v.input_features for k,v in self._datasets.items()}
if self.as_list:
return list(_input_features.values())
return _input_features
@property
def output_features(self):
_output_features = {k:v.output_features for k,v in self._datasets.items()}
if self.as_list:
return list(_output_features.values())
return _output_features
@property
def indexes(self): # todo, needs testing
# multiple DataSets will have separate indexes so keys in idx will get updated
idx = {}
for ds in self._datasets.values():
for k, v in ds.indexes.items():
#print(ds_n, k, type(v))
idx[k] = v
return idx
@property
def ts_args(self)->dict:
_ts_args = {k:ds.ts_args for k,ds in self._datasets.items()}
return _ts_args
def training_data(self, key="train", **kwargs)->Union[list, dict]:
if self.teacher_forcing:
x, prev_y, y = self._get_x_yy('training_data', key, **kwargs)
return self.return_x_yy(x, prev_y, y, 'Training')
else:
x, y = self._get_xy('training_data', key, **kwargs)
return self.return_xy(x, y, 'Training')
def validation_data(self, key="val", **kwargs):
if self.teacher_forcing:
x, prev_y, y = self._get_x_yy('validation_data', key, **kwargs)
return self.return_x_yy(x, prev_y, y, 'Validation')
else:
x, y = self._get_xy('validation_data', key, **kwargs)
return self.return_xy(x, y, 'Validation')
def test_data(self, key="test", **kwargs):
if self.teacher_forcing:
x, prev_y, y = self._get_x_yy('test_data', key, **kwargs)
return self.return_x_yy(x, prev_y, y, 'Test')
else:
x, y = self._get_xy('test_data', key, **kwargs)
return self.return_xy(x, y, 'Test')
def _get_x_yy(self, method, key, **kwargs):
x, prev_y, y = {}, {}, {}
exs = {}
for ds_name, ds in self._datasets.items():
x[ds_name], prev_y[ds_name], y[ds_name] = getattr(ds, method)(key, **kwargs)
exs[ds_name] = {'x': len(x[ds_name]), 'y': len(y[ds_name])}
self.examples[method] = exs
if self.as_list:
return list(x.values()), list(prev_y.values()), list(y.values())
return x, prev_y, y
def _get_xy(self, method, key, **kwargs):
x, y = {}, {}
exs = {}
for ds_name, ds in self._datasets.items():
_x, _y = getattr(ds, method)(key, **kwargs)
x[ds_name] = _x
# if one ds does not return y, better to ignroe this y, because when we
# stack ys together, they must be same
if _y.size > 0:
y[ds_name] = _y
exs[ds_name] = {'x': len(x[ds_name]), 'y': len(y[ds_name])}
else:
exs[ds_name] = {'x': len(x[ds_name]), 'y': 0}
self.examples[method] = exs
# it is possible that y has only 1 member because all other DataSets don'y have
# any y and may have already been purged.
if self.stack_y and len(y)>1:
assert np.allclose(*list(y.values()))
y = {'y': list(y.values())[0]}
if self.as_list:
return list(x.values()), list(y.values())
return x, y | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/preprocessing/dataset/_union.py | _union.py |
import json
from ai4water.backend import np, pd, sklearn
OneHotEncoder = sklearn.preprocessing.OneHotEncoder
def consider_intervals(data, intervals):
_source = data
if intervals is not None:
if isinstance(data, pd.DataFrame):
try: # if indices in intervals are of same type as that of index
# -1 so that .loc and .iloc give same results, however this is not possible
# with DatetimeIndex
if isinstance(data.index, pd.DatetimeIndex):
_source = pd.concat([data.loc[st:en] for st, en in intervals])
else:
_source = pd.concat([data.loc[st:en - 1] for st, en in intervals])
except TypeError: # assuming indices in intervals are integers
_source = pd.concat([data.iloc[st:en] for st, en in intervals])
return _source
def load_data_from_hdf5(data_type, data):
import h5py
f = h5py.File(data, mode='r')
g = f[data_type]
weight_names = list(g.keys())
weight_values = [np.asarray(g[weight_name]) for weight_name in weight_names]
f.close()
return weight_values
def check_for_classification(label: np.ndarray, to_categorical):
assert isinstance(label, np.ndarray), f"""
classification problem for label of type {label.__class__.__name__} not implemented yet"""
# for clsasification, it should be 2d
label = label.reshape(-1, label.shape[1])
if to_categorical:
assert label.shape[1] == 1
label = OneHotEncoder(sparse=False).fit_transform(label)
# else: # mutlti_label/binary problem
# # todo, is only binary_crossentropy is binary/multi_label problem?
# pass #assert self.loss_name() in ['binary_crossentropy']
return label
def decode(json_string):
return json.loads(json_string, object_hook=_decode_helper)
def _decode_helper(obj):
"""A decoding helper that is TF-object aware."""
if isinstance(obj, dict) and 'class_name' in obj:
if obj['class_name'] == '__tuple__':
return tuple(_decode_helper(i) for i in obj['items'])
elif obj['class_name'] == '__ellipsis__':
return Ellipsis
return obj | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/preprocessing/dataset/utils.py | utils.py |
__all__ = ['DataSetPipeline']
from ai4water.backend import np, os
from ._main import _DataSet
class DataSetPipeline(_DataSet):
"""A collection of DataSets concatenated one after the other. A DataSetPipeLine
of four DataSets will be as follows:
+----------+
| DataSet1 |
+----------+
| DataSet2 |
+----------+
| DataSet3 |
+----------+
| DataSet4 |
+----------+
The only condition for different datasets is that they have the same output dimension.
"""
def __init__(
self,
*datasets: _DataSet,
verbosity=1
) -> None:
"""
Parameters
----------
*datasets :
the datasets to be combined
verbosity :
controls the output information being printed.
Examples
---------
>>> import pandas as pd
>>> from ai4water.preprocessing import DataSet, DataSetPipeline
>>> df1 = pd.DataFrame(np.random.random((100, 10)),
... columns=[f"Feat_{i}" for i in range(10)])
>>> df2 = pd.DataFrame(np.random.random((200, 10)),
... columns=[f"Feat_{i}" for i in range(10)])
>>> ds1 = DataSet(df1)
>>> ds2 = DataSet(df2)
>>> ds = DataSetPipeline(ds1, ds2)
>>> train_x, train_y = ds.training_data()
>>> val_x, val_y = ds.validation_data()
>>> test_x, test_y = ds.test_data()
"""
self.verbosity = verbosity
self._datasets = []
for ds in datasets:
ds.verbosity = 0
assert isinstance(ds, _DataSet), f"""
{ds} is not a valid dataset"""
self._datasets.append(ds)
self.examples = {}
_DataSet.__init__(self, config={}, path=os.getcwd())
self.index = 0
def __iter__(self):
return self
def __next__(self):
try:
item = self._datasets[self.index]
except IndexError:
self.index = 0
raise StopIteration
self.index += 1
return item
def __getitem__(self, item:int):
return self._datasets[item]
@property
def num_datasets(self) -> int:
return len(self._datasets)
@property
def teacher_forcing(self):
return all([ds.teacher_forcing for ds in self._datasets])
@property
def mode(self):
return all([ds.mode for ds in self._datasets])
@property
def is_binary(self):
return all([ds.is_binary for ds in self._datasets])
@property
def input_features(self):
_input_features = [ds.input_features for ds in self._datasets]
return _input_features
@property
def output_features(self):
_output_features = [ds.output_features for ds in self._datasets]
return _output_features
def training_data(self, key="train", **kwargs):
if self.teacher_forcing:
x, prev_y, y = self._get_x_yy('training_data')
return x, prev_y, y
else:
x, y = self._get_xy('training_data')
return self.return_xy(x, y, "Training")
def validation_data(self, key="val", **kwargs):
if self.teacher_forcing:
x, prev_y, y = self._get_x_yy('validation_data')
return x, prev_y, y
else:
x, y = self._get_xy('validation_data')
return self.return_xy(x, y, "Validation")
def test_data(self, key="test", **kwargs):
if self.teacher_forcing:
x, prev_y, y = self._get_x_yy('test_data')
return x, prev_y, y
else:
x, y = self._get_xy('test_data')
return self.return_xy(x, y, "Test")
def _get_x_yy(self, method):
x, prev_y, y = [], [], []
exs = {}
for idx, ds in enumerate(self._datasets):
_x, _prev_y, _y = getattr(ds, method)()
x.append(_x)
prev_y.append(_prev_y)
y.append(_y)
exs[idx] = {'x': len(x), 'y': len(y)}
self.examples[method] = exs
if not all([i.size for i in x]):
x = conform_shape(x)
prev_y = conform_shape(prev_y)
y = conform_shape(y)
return np.row_stack(x), np.row_stack(prev_y), np.row_stack(y)
def _get_xy(self, method):
x, y = [], []
exs = {}
for idx, ds in enumerate(self._datasets):
_x, _y = getattr(ds, method)()
x.append(_x)
y.append(_y)
exs[idx] = {'x': len(x), 'y': len(y)}
self.examples[method] = exs
if not all([i.size for i in x]):
x = conform_shape(x)
y = conform_shape(y)
return np.row_stack(x), np.row_stack(y)
def conform_shape(alist:list):
desired_shape = list([i.shape for i in alist if i.size != 0][0])
desired_shape[0] = 0
return [np.zeros(desired_shape) if arr.size == 0 else arr for arr in alist] | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/preprocessing/dataset/_pipeline.py | _pipeline.py |
from typing import Union
from sklearn.utils.validation import assert_all_finite
from ai4water.backend import np, pd, plt, stats
from ai4water.backend import easy_mpl as em
from ai4water.utils.utils import dateandtime_now, deepcopy_dict_without_clone
from ._transformations import MinMaxScaler, PowerTransformer, QuantileTransformer, StandardScaler
from ._transformations import LogScaler, Log10Scaler, Log2Scaler, TanScaler, SqrtScaler, CumsumScaler
from ._transformations import FunctionTransformer, RobustScaler, MaxAbsScaler
from ._transformations import ParetoTransformer
from ._transformations import VastTransformer
from ._transformations import MmadTransformer
from ._transformations import Center
from ._transformations import HyperbolicTangentTransformer
from ._transformations import LogisticSigmoidTransformer
from .utils import InvalidTransformation, TransformerNotFittedError, SP_METHODS
# TODO add logistic, tanh and more transformers.
# which transformation to use? Some related articles/posts
# https://scikit-learn.org/stable/modules/preprocessing.html
# http://www.faqs.org/faqs/ai-faq/neural-nets/part2/section-16.html
# https://data.library.virginia.edu/interpreting-log-transformations-in-a-linear-model/
class TransformationsContainer(object):
def __init__(self):
self.transformer_ = None
self.transforming_straight = True
self.index = None
INITIATED_TRANSFORMERS = {
'log': LogScaler(),
'log2': Log2Scaler(),
'log10': Log10Scaler(),
'sqrt': SqrtScaler()
}
class _Processor(object):
def __init__(self,
replace_zeros,
replace_zeros_with,
treat_negatives,
features=None
):
self.replace_zeros = replace_zeros
self.replace_zeros_with = replace_zeros_with
self.treat_negatives = treat_negatives
self.features = features
self.index = None
def preprocess(self, data, transforming_straight=True):
"""Makes sure that data is dataframe and optionally replaces nans"""
data = to_dataframe(data)
# save the index if not already saved so that can be used later
if self.index is None:
self.index = data.index
columns = self.features or data.columns
indices = {}
if self.replace_zeros and transforming_straight:
# instead of saving indices with column names, using column indices
# because df.iloc[row_idx, col_idx] is better than df[col_name].iloc[row_idx]
for col_idx, col in enumerate(columns):
# find index containing 0s in corrent column of dataframe
i = data.index[data[col] == 0.0]
if len(i) > 0:
indices[col_idx] = i.values
if self.replace_zeros_with in ['mean', 'max', 'min']:
replace_with = float(getattr(np, 'nan' + self.replace_zeros_with)(data[col]))
else:
replace_with = self.replace_zeros_with
data.loc[indices[col_idx], col] = get_val(data[col], replace_with)
#if self.zero_indices is None:
self.zero_indices_ = indices
indices = {}
if self.treat_negatives:
for col_idx, col in enumerate(columns):
# find index containing negatives in corrent column of dataframe
i = data.index[data[col] < 0.0]
if len(i) > 0:
indices[col_idx] = i.values
# turn -ve values into positives
data[col] = data[col].abs()
self.negative_indices_ = indices
return data
def postprocess(self, data):
"""If nans/zeros were replaced with some value, put nans/zeros back."""
data = data.copy()
if self.replace_zeros:
if hasattr(self, 'zero_indices_'):
for col, idx in self.zero_indices_.items():
data.iloc[idx, col] = 0.0
if self.treat_negatives:
if hasattr(self, 'negative_indices_'):
for col, idx in self.negative_indices_.items():
# invert the sign of those values which were originally -ve
for _idx in idx:
data.iat[_idx, col] = -data.iat[_idx, col]
return data
class Transformation(TransformationsContainer):
"""
Applies transformation to tabular data. It is also possible to apply transformation
on some selected features/columns of data. This class also performs some optional
pre-processing on data before applying transformation on it.
Any new transforming methods should define two methods one starting with
`transform_with_` and `inverse_transofrm_with_`
Currently following methods are available for transformation and inverse transformation
Transformation methods
- ``minmax``
- ``maxabs``
- ``robust``
- ``power`` same as yeo-johnson
- ``yeo-johnson`` power transformation using Yeo-Johnson method
- ``box-cox`` power transformation using box-cox method
- ``zscore`` also known as standard scalers
- ``scale`` division by standard deviation
- ``center`` by subtracting mean
- ``quantile``
- ``quantile_normal`` quantile with normal distribution as target
- ``log`` natural logrithmic
- ``log10`` log with base 10
- ``log2`` log with base 2
- ``sqrt`` square root
- ``tan`` tangent
- ``cumsum`` cummulative sum
- ``mmax`` median and median absolute deviation
- ``pareto``
- ``vast`` Variable Stability Scaling
- ``sigmoid`` logistic sigmoid
- ``tanh`` hyperbolic tangent
To transform a datafrmae using any of the above methods use
Examples:
>>> from ai4water.preprocessing import Transformation
>>> transformer = Transformation(method='zscore')
>>> transformer.fit_transform(data=[1,2,3,5])
or
>>> transformer = Transformation(method='minmax')
>>> normalized_df = transformer.fit_transform(data=pd.DataFrame([1,2,3]))
>>> transformer = Transformation(method='log', replace_zeros=True)
>>> trans_df, proc = transformer.fit_transform(data=pd.DataFrame([1,0,2,3]),
>>> return_proc=True)
>>> detransfomred_df = transformer.inverse_transform(trans_df, postprocessor=proc)
or using one liner
>>> normalized_df = Transformation(method='minmax',
... features=['a'])(data=pd.DataFrame([[1,2],[3,4], [5,6]],
... columns=['a', 'b']))
where ``method`` can be any of the above mentioned methods.
Note
------
``tan``, ``tanh``, ``sigmoid`` and ``cumsum`` do not return original data upon
inverse transformation.
.. _google:
https://developers.google.com/machine-learning/data-prep/transform/normalization
"""
available_transformers = {
"minmax": MinMaxScaler,
"zscore": StandardScaler,
"center": Center,
"scale": StandardScaler,
"robust": RobustScaler,
"maxabs": MaxAbsScaler,
"power": PowerTransformer,
"yeo-johnson": PowerTransformer,
"box-cox": PowerTransformer,
"quantile": QuantileTransformer,
"quantile_normal": QuantileTransformer,
"log": LogScaler,
"log10": Log10Scaler,
"log2": Log2Scaler,
"sqrt": SqrtScaler,
"tan": TanScaler,
"cumsum": CumsumScaler,
"vast": VastTransformer,
"pareto": ParetoTransformer,
"mmad": MmadTransformer,
"sigmoid": LogisticSigmoidTransformer,
"tanh": HyperbolicTangentTransformer,
}
def __init__(self,
method: str = 'minmax',
features: list = None,
replace_zeros: bool = False,
replace_zeros_with: Union[str, int, float] = 1,
treat_negatives: bool = False,
**kwargs
):
"""
Arguments:
method : method by which to transform and consequencly inversely
transform the data. default is 'minmax'. see `Transformations.available_transformers`
for full list.
features : string or list of strings. Only applicable if `data` is
dataframe. It defines the columns on which we want to apply transformation.
The remaining columns will remain same/unchanged.
replace_zeros : If true, then setting this argument to True will replace
the zero values in data with some fixed value `replace_zeros_with`
before transformation. The zero values will be put back at their
places after transformation so this replacement/implacement is
done only to avoid error during transformation for example during Box-Cox.
replace_zeros_with : if replace_zeros is True, then this value will be used
to replace zeros in dataframe before doing transformation. You can
define the method with which to replace nans for exaple by setting
this argument to 'mean' will replace zeros with 'mean' of the
array/column which contains zeros. Allowed string values are
'mean', 'max', 'min'. see_
treat_negatives:
If true, and if data contains negative values, then the absolute
values of these negative values will be considered for transformation.
For inverse transformation, the -ve sign is removed, to return the
original data. This option is necessary for log, sqrt and box-cox
transformations with -ve values in data.
kwargs : any arguments which are to be provided to transformer on
INTIALIZATION and not during transform or inverse transform
Example:
>>> from ai4water.preprocessing.transformations import Transformation
>>> from ai4water.datasets import busan_beach
>>> df = busan_beach()
>>> inputs = ['tide_cm', 'wat_temp_c', 'sal_psu', 'air_temp_c', 'pcp_mm', 'pcp3_mm']
>>> transformer = Transformation(method='minmax', features=['sal_psu', 'air_temp_c'])
>>> new_data = transformer.fit_transform(df[inputs])
Following shows how to apply log transformation on an array containing zeros
by making use of the argument `replace_zeros`. The zeros in the input array
will be replaced internally but will be inserted back afterwards.
>>> from ai4water.preprocessing.transformations import Transformation
>>> transformer = Transformation(method='log', replace_zeros=True)
>>> transformed_data = transformer.fit_transform([1,2,3,0.0, 5, np.nan, 7])
... [0.0, 0.6931, 1.0986, 0.0, 1.609, None, 1.9459]
>>> original_data = transformer.inverse_transform(data=transformed_data)
.. _see:
https://stats.stackexchange.com/a/222237/338323
"""
super().__init__()
if method not in self.available_transformers.keys():
raise InvalidTransformation(method, list(self.available_transformers.keys()))
self.method = method
self.replace_zeros = replace_zeros
self.replace_zeros_with = replace_zeros_with
self.treat_negatives = treat_negatives
self.features = features
self.kwargs = kwargs
self.transformed_features = None
if self.transformer_ is None: # self.transformer_ can be set during from_config
_kwargs = {}
if self.method == "scale":
_kwargs['with_mean'] = False
elif self.method == "box-cox":
_kwargs['method'] = "box-cox"
elif self.method == "quantile_normal":
_kwargs["output_distribution"] = "normal"
for k,v in self.kwargs.items():
if k in _kwargs:
_kwargs.pop(k)
transformer = self.get_transformer()(**_kwargs, **kwargs)
self.transformer_ = transformer
def __call__(self, data, what="fit_transform", return_proc=False, **kwargs):
"""
Calls the `fit_transform` and `inverse_transform` methods.
"""
if what.startswith("fit"):
self.transforming_straight = True
return self.fit_transform(data, return_proc=return_proc, **kwargs)
elif what.startswith("inv"):
self.transforming_straight = False
return self.inverse_transform(data, **kwargs)
else:
raise ValueError(f"The class Transformation can not be called with keyword argument 'what'={what}")
@property
def features(self):
return self._features
@features.setter
def features(self, x):
if x is not None:
assert len(x) == len(set(x)), f"duplicated features are not allowed. Features are: {x}"
self._features = x
@property
def transformed_features(self):
return self._transformed_features
@transformed_features.setter
def transformed_features(self, x):
self._transformed_features = x
@property
def num_features(self):
return len(self.features)
def get_transformer(self):
return self.available_transformers[self.method.lower()]
def _preprocess(self, data):
self.transforming_straight = True
proc = _Processor(self.replace_zeros,
self.replace_zeros_with,
self.treat_negatives,
features=self.features
)
data = proc.preprocess(data.copy())
if self.features is None:
self.features = list(data.columns)
setattr(self, 'initial_shape_', data.shape)
to_transform = self.get_features(data)
if self.method.lower() in ["log", "log10", "log2"]:
if (to_transform.values < 0).any():
raise InvalidValueError(self.method, "negative")
return to_transform, proc
def fit(self, data, **kwargs):
"""fits the data according the transformation methods."""
to_transform, proc = self._preprocess(data)
if self.method in ['power', 'yeo-johnson', 'box-cox']:
# a = np.array([87.52, 89.41, 89.4, 89.23, 89.92], dtype=np.float32).reshape(-1,1)
# power transformers sometimes overflow with small data which causes inf error
to_transform = to_transform.astype("float64")
return self.transformer_.fit(to_transform.values, **kwargs)
def transform(self, data, return_proc=False, **kwargs):
"""transforms the data according to fitted transformers."""
original_data = to_dataframe(data.copy())
to_transform, proc = self._preprocess(data)
if self.method in ['power', 'yeo-johnson', 'box-cox']:
# a = np.array([87.52, 89.41, 89.4, 89.23, 89.92], dtype=np.float32).reshape(-1,1)
# power transformers sometimes overflow with small data which causes inf error
to_transform = to_transform.astype("float64")
data = self.transformer_.transform(to_transform.values, **kwargs)
return self._postprocess(data, to_transform, original_data, proc, return_proc)
def fit_transform(self, data, return_proc=False, **kwargs):
"""
Transforms the data
Arguments:
data : a dataframe or numpy ndarray or array like. The transformed or inversely
transformed value will have the same type as data and will have
the same index as data (in case data is dataframe). The shape of
`data` is supposed to be (num_examples, num_features).
return_proc : whether to return the processer or not. If True, then a
tuple is returned which consists of transformed data and second is the preprocessor.
kwargs :
"""
original_data = to_dataframe(data.copy())
to_transform, proc = self._preprocess(data)
try:
data = self.transformer_.fit_transform(to_transform.values, **kwargs)
except ValueError as e:
raise ValueError(f"Transformation {self.method} of {self.features} features raised {e}")
return self._postprocess(data, to_transform, original_data, proc, return_proc)
def _postprocess(self, data, to_transform, original_data, proc, return_proc):
data = pd.DataFrame(data, columns=to_transform.columns)
data = self.maybe_insert_features(original_data, data)
data = proc.postprocess(data)
if return_proc:
return data, proc
return data
def inverse_transform(self,
data,
postprocessor:_Processor=None,
without_fit=False,
**kwargs):
"""
Inverse transforms the data.
Parameters
---------
data:
postprocessor :
without_fit : bool
kwargs : any of the folliwng keyword arguments
- data: data on which to apply inverse transformation
- key : key to fetch transformer
- transformer : transformer to use for inverse transformation. If not given, then
the available transformer is used.
"""
self.transforming_straight = False
# during transform, we convert to df even when input is list or np array
# which inserts columns/features into data.
data = to_dataframe(data)
if self.treat_negatives and hasattr(postprocessor, "negative_indices_"):
for col, idx in postprocessor.negative_indices_.items():
data.iloc[idx, col] = -data.iloc[idx, col]
if 'transformer' in kwargs:
transformer = kwargs['transformer']
elif self.transformer_ is not None:
transformer = self.transformer_
elif self.method in SP_METHODS:
transformer = INITIATED_TRANSFORMERS[self.method]
without_fit = True
else:
raise TransformerNotFittedError()
if self.treat_negatives and hasattr(self, "negative_indices_"):
for col, idx in self.negative_indices_.items():
data.iloc[idx, col] = -data.iloc[idx, col]
self.transforming_straight = False
original_data = data.copy()
to_transform = self.get_features(data)
if without_fit:
data = transformer.inverse_transform_without_fit(to_transform)
else:
data = transformer.inverse_transform(to_transform.values)
data = pd.DataFrame(data, columns=to_transform.columns)
data = self.maybe_insert_features(original_data, data)
if postprocessor is not None:
data = postprocessor.postprocess(data)
return data
def get_features(self, data) -> pd.DataFrame:
if self.features is None:
return data
else:
assert isinstance(self.features, list)
return data[self.features]
def serialize_transformer(self, transformer):
key = self.method + str(dateandtime_now())
serialized_transformer = {
"transformer": transformer,
"key": key
}
self.transformer_ = transformer
return serialized_transformer
def get_transformer_from_dict(self, **kwargs):
if 'transformer' in kwargs:
transformer = kwargs['transformer']
else:
raise TransformerNotFittedError()
return transformer
def maybe_insert_features(self, original_df, trans_df):
trans_df.index = original_df.index
num_features = len(original_df.columns)
if len(trans_df.columns) != num_features:
df = pd.DataFrame(index=original_df.index)
for col in original_df.columns: # features:
if col in trans_df.columns:
_df = trans_df[col]
else:
_df = original_df[col]
df = pd.concat([df, _df], axis=1)
else:
df = trans_df
assert df.shape == original_df.shape, f"shape changed from {original_df.shape} to {df.shape}"
return df
def config(self)->dict:
"""returns a dictionary which can be used to reconstruct `Transformation`
class using `from_config`.
Returns:
a dictionary
"""
assert self.transformer_ is not None, f"Transformation is not fitted yet"
return {
"transformer": {self.method: self.transformer_.config()},
"shape": self.initial_shape_,
"method": self.method,
"features": self.features,
"replace_zeros": self.replace_zeros,
"replace_zeros_with": self.replace_zeros_with,
"treat_negatives": self.treat_negatives,
"kwargs": self.kwargs,
}
@classmethod
def from_config(
cls,
config:dict
)-> "Transformation":
"""constructs the `Transformation` class from `config` which has
already been fitted/transformed.
Arguments:
config:
a dicionary which is the output of `config()` method.
Returns:
an instance of `Transformation` class.
"""
config = deepcopy_dict_without_clone(config)
shape = config.pop('shape')
transformer = config.pop('transformer')
assert len(transformer) == 1
transformer_name = list(transformer.keys())[0]
transformer_config = list(transformer.values())[0]
if 'kwargs' in config:
kwargs = config.pop('kwargs')
transformer = cls(**config, **kwargs)
# initiate the transformer
tr_initiated = transformer.available_transformers[transformer_name].from_config(transformer_config)
transformer.transformer_ = tr_initiated
transformer.initial_shape_ = shape
return transformer
def plot_comparison(
self,
data,
plot_type:str = "hist",
show:bool=True,
figsize:tuple = None,
**kwargs
)->plt.Figure:
"""
compares original and transformed data
Parameters
----------
data :
the data on which to apply transformation. It can list, numpy array or pandas dataframe
plot_type : str, optional (default="hist")
either ``hist``, ``probplot`` or ``line``
show : bool, optional (default=True)
whether to show the plot or not
figsize : tuple, optional (default=None)
figure size (width, height)
**kwargs :
any keyword arguments for easy_mpl.hist or easy_mpl.plot when
plot_type is "hist" or "probplot" respectively.
Returns
-------
plt.Figure
Examples
--------
>>> from ai4water.preprocessing import Transformation
>>> import numpy as np
>>> t = Transformation()
>>> t.plot_comparison(np.random.randint(1, 100, (100, 2)))
... # compare using probability plot
>>> t.plot_comparison(np.random.randint(1, 100, (100, 2)), "probplot")
... # or a simple line plot
>>> t.plot_comparison(np.random.randint(1, 100, (100, 2)), "line", figsize=(14, 6))
"""
x_ = self.fit_transform(data)
funcs = {
"hist": hist,
"probplot": probplot,
"line": plot
}
func = funcs[plot_type]
if len(x_) == x_.size:
# it is 1d
fig, axes = plt.subplots(1, 2, figsize=figsize)
func(data, ax=axes[0], ** kwargs, ax_kws=dict(title="original"), show=False)
func(x_, ax = axes[1], **kwargs, ax_kws=dict(title="Transformed"), show=False)
else:
fig, axes = plt.subplots(x_.shape[1], 2, figsize=figsize)
if isinstance(data, pd.DataFrame):
data = data.values
for idx in range(len(axes)):
title1, title2 = None, None
if idx == 0:
title1, title2 = "Original", "Transformed"
func(data[:, idx], ax=axes[idx, 0], ax_kws=dict(title=title1),
show=False, **kwargs)
func(x_.iloc[:, idx], ax=axes[idx, 1], ax_kws=dict(title=title2),
show=False, **kwargs)
plt.suptitle(self.method)
if show:
plt.show()
return fig
def hist(x, ax, **kwargs):
return em.hist(x, ax=ax, **kwargs)
def plot(x, ax, **kwargs):
# make sure that it is 1D
x = np.array(x)
assert len(x) == np.size(x)
x = x.reshape(-1,)
return em.plot(x, ax=ax, **kwargs)
def probplot(x, ax, **kwargs):
# make sure that it is 1D
x = np.array(x)
assert len(x) == np.size(x)
x = x.reshape(-1,)
(osm, osr), (slope, intercept, r) = stats.probplot(x,
dist="norm",
plot=ax)
return em.plot(osm, osr, ax=ax, **kwargs)
def get_val(df: pd.DataFrame, method):
if isinstance(method, str):
if method.lower() == "mean":
return df.mean()
elif method.lower() == "max":
return df.max()
elif method.lower() == "min":
return df.min()
elif isinstance(method, int) or isinstance(method, float):
return method
else:
raise ValueError(f"unknown method {method} to replace nan vlaues")
class InvalidValueError(Exception):
def __init__(self, method, reason):
self.method = method
self.reason = reason
def remedy(self):
if self.reason == "NaN":
return "Try setting 'replace_nans' to True"
elif self.reason == "zero":
return "Try setting 'replace_zeros' to True"
elif self.reason == "negative":
return "Try setting 'treat_negatives' to True"
def __str__(self):
return (f"""
Input data contains {self.reason} values so {self.method} transformation
can not be applied.
{self.remedy()}
""")
def to_dataframe(data)->pd.DataFrame:
if isinstance(data, pd.DataFrame):
data = data
else:
data = np.array(data)
if data.ndim == 1:
data = data.reshape(-1, 1)
assert isinstance(data, np.ndarray)
data = pd.DataFrame(data, #columns=['data' + str(i) for i in range(data.shape[1])]
)
return data | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/preprocessing/transformations/_main.py | _main.py |
from typing import List
from scipy.special import boxcox
from scipy.special import expit
from ai4water.backend import np, sklearn
from ai4water.utils.utils import jsonize
SKMinMaxScaler = sklearn.preprocessing.MinMaxScaler
SKStandardScaler = sklearn.preprocessing.StandardScaler
SKRobustScaler = sklearn.preprocessing.RobustScaler
SKPowerTransformer = sklearn.preprocessing.PowerTransformer
SKQuantileTransformer = sklearn.preprocessing.QuantileTransformer
SKFunctionTransformer = sklearn.preprocessing.FunctionTransformer
SKMaxAbsScaler = sklearn.preprocessing.MaxAbsScaler
check_is_fitted = sklearn.utils.validation.check_is_fitted
# todo
# inverse hyperbolic transformation: effective with many zeros
class ScalerWithConfig(object):
"""Extends the sklearn's scalers in such a way that they can be
saved to a json file an d loaded from a json file
Methods
--------
- config
- form_config
"""
@property
def config_paras(self) -> list:
raise NotImplementedError
def get_params(self):
raise NotImplementedError
@classmethod
def from_config(cls, config: dict):
"""Build the scaler/transformer from config
Arguments:
config : dictionary of parameters which can be used to build transformer/scaler.
Returns :
An instance of scaler/transformer
"""
scaler = cls(**config['params'])
setattr(scaler, '_config', config['config'])
setattr(scaler, '_from_config', True)
for attr, attr_val in config['config'].items():
setattr(scaler, attr, attr_val)
return scaler
def config(self) -> dict:
"""Returns all the parameters in scaler/transformer in a dictionary"""
if self.__class__.__name__ == 'MyFunctionTransformer':
pass
else:
check_is_fitted(self)
_config = {}
for attr in self.config_paras:
_config[attr] = getattr(self, attr)
return {"params": self.get_params(),
"config": _config}
class MinMaxScaler(SKMinMaxScaler, ScalerWithConfig):
@property
def config_paras(self):
return ['scale_', 'min_', 'n_samples_seen_', 'data_min_', 'data_max_', 'data_range_']
class StandardScaler(SKStandardScaler, ScalerWithConfig):
@property
def config_paras(self):
return ['scale_', 'n_samples_seen_', 'mean_', 'var_', 'n_features_in_']
class RobustScaler(SKRobustScaler, ScalerWithConfig):
@property
def config_paras(self):
return ['scale_', 'center_']
class PowerTransformer(SKPowerTransformer, ScalerWithConfig):
"""This transformation enhances scikit-learn's PowerTransformer by allowing
the user to define `lambdas` parameter for each input feature. The default
behaviour of this transformer is same as that of scikit-learn's.
"""
def __init__(self, method='yeo-johnson', *,
rescale=False,
pre_center:bool = False,
standardize=True,
copy=True,
lambdas=None):
"""
lambdas: float or 1d array like for each feature. If not given, it is
calculated from scipy.stats.boxcox(X, lmbda=None). Only available
if method is box-cox.
pre_center:
center the data before applying power transformation. see github [1] for more discussion
rescale:
For complete documentation see scikit-learn's documentation [2]
.. [2]
https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html
.. [1]
https://github.com/scikit-learn/scikit-learn/issues/14959
"""
if lambdas is not None:
if isinstance(lambdas, float):
lambdas = np.array([lambdas])
lambdas = np.array(lambdas)
# if given, lambdas must be a 1d array
assert lambdas.size == len(lambdas)
lambdas = lambdas.reshape(-1,)
assert method != "yeo-johnson"
self.lambdas = lambdas
self.rescale = rescale
self.pre_center = pre_center
super(PowerTransformer, self).__init__(method=method,
standardize=standardize,
copy=copy)
@property
def config_paras(self):
return ['lambdas_', 'scaler_to_standardize_',
'pre_center_config_', 'rescaler_config_', 'n_features_in_']
@classmethod
def from_config(cls, config: dict):
"""Build the scaler/transformer from config
Arguments:
config : dictionary of parameters which can be used to build transformer/scaler.
Returns :
An instance of scaler/transformer
"""
scaler = cls(**config['params'])
setattr(scaler, '_config', config['config'])
setattr(scaler, '_from_config', True)
_scaler_config = config['config'].pop('scaler_to_standardize_')
setattr(scaler, '_scaler', StandardScaler.from_config(_scaler_config))
rescaler = config['config'].pop('rescaler_config_')
if rescaler:
setattr(scaler, 'rescaler_', MinMaxScaler.from_config(rescaler))
else:
setattr(scaler, 'rescaler_', None)
pre_standardizer = config['config'].pop('pre_center_config_')
if pre_standardizer:
setattr(scaler, 'pre_centerer_', Center.from_config(pre_standardizer))
else:
setattr(scaler, 'pre_centerer_', None)
for attr, attr_val in config['config'].items():
setattr(scaler, attr, attr_val)
if isinstance(scaler.lambdas_, float):
scaler.lambdas_ = [scaler.lambdas_]
return scaler
def _fit(self, X, y=None, force_transform=False):
"""copying from sklearn because we want to use our own StandardScaler
which can be serialzied. and optionally with user provided with lambda
parameter."""
X = self._check_input(X, in_fit=True, check_positive=True,
check_method=True)
if not self.copy and not force_transform: # if call from fit()
X = X.copy() # force copy so that fit does not change X inplace
X = self._maybe_rescale(X, force_transform)
X = self._maybe_precenter(X, force_transform)
optim_function = {'box-cox': self._box_cox_optimize,
'yeo-johnson': self._yeo_johnson_optimize
}[self.method]
if self.lambdas is None:
with np.errstate(invalid='ignore'): # hide NaN warnings
self.lambdas_ = np.array([optim_function(col) for col in X.T])
else: # take user defined lambdas
self.lambdas_ = self.lambdas
if self.standardize or force_transform:
transform_function = {'box-cox': boxcox,
'yeo-johnson': self._yeo_johnson_transform
}[self.method]
for i, lmbda in enumerate(self.lambdas_):
with np.errstate(invalid='ignore'): # hide NaN warnings
X[:, i] = transform_function(X[:, i], lmbda)
setattr(self, 'scaler_to_standardize_', None)
if self.standardize:
self._scaler = StandardScaler(copy=False)
if force_transform:
X = self._scaler.fit_transform(X)
else:
self._scaler.fit(X)
setattr(self, 'scaler_to_standardize_', self._scaler.config())
return X
def _maybe_rescale(self, X, force_transform):
self.rescaler_config_ = None
if self.rescale:
rescaler = MinMaxScaler()
self.rescaler_ = rescaler
if force_transform:
X = rescaler.fit_transform(X)
else:
X = rescaler.fit(X)
self.rescaler_config_ = rescaler.config()
return X
def _maybe_precenter(self, X, force_transform=False):
self.pre_center_config_ = None
if self.pre_center:
pre_centerer = Center()
self.pre_centerer_ = pre_centerer
if force_transform:
X = pre_centerer.fit_transform(X)
else:
X = pre_centerer.fit(X)
self.pre_center_config_ = pre_centerer.config()
return X
def inverse_transform(self, X):
"""Apply the inverse power transformation using the fitted lambdas.
The inverse of the Box-Cox transformation is given by::
if lambda_ == 0:
X = exp(X_trans)
else:
X = (X_trans * lambda_ + 1) ** (1 / lambda_)
The inverse of the Yeo-Johnson transformation is given by::
if X >= 0 and lambda_ == 0:
X = exp(X_trans) - 1
elif X >= 0 and lambda_ != 0:
X = (X_trans * lambda_ + 1) ** (1 / lambda_) - 1
elif X < 0 and lambda_ != 2:
X = 1 - (-(2 - lambda_) * X_trans + 1) ** (1 / (2 - lambda_))
elif X < 0 and lambda_ == 2:
X = 1 - exp(-X_trans)
Parameters
----------
X : array-like of shape (n_samples, n_features)
The transformed data.
Returns
-------
X : ndarray of shape (n_samples, n_features)
The original data.
"""
X = super(PowerTransformer, self).inverse_transform(X)
if self.pre_center:
X = self.pre_centerer_.inverse_transform(X)
if self.rescale:
X = self.rescaler_.inverse_transform(X)
return X
class QuantileTransformer(SKQuantileTransformer, ScalerWithConfig):
@property
def config_paras(self):
return ['n_quantiles_', 'references_', 'quantiles_']
@classmethod
def from_config(cls, config: dict):
"""Build the scaler/transformer from config
Arguments:
config : dictionary of parameters which can be used to build transformer/scaler.
Returns :
An instance of scaler/transformer
"""
scaler = cls(**config['params'])
setattr(scaler, '_config', config['config'])
setattr(scaler, '_from_config', True)
scaler.n_quantiles_ = config['config']['n_quantiles_']
scaler.references_ = np.array(config['config']['references_'])
quantiles_ = np.array(config['config']['quantiles_'])
# make sure it is 2d
quantiles_ = quantiles_.reshape(len(quantiles_), -1)
scaler.quantiles_ = quantiles_
return scaler
class MaxAbsScaler(SKMaxAbsScaler, ScalerWithConfig):
@property
def config_paras(self):
return ['scale_', 'n_samples_seen_', 'max_abs_']
class Center(ScalerWithConfig):
def __init__(
self,
feature_dim="2d",
axis=0
):
self.feature_dim = feature_dim
self.axis = axis
def fit(self, x:np.ndarray):
dim = x.ndim
mean = np.nanmean(x, axis=self.axis)
setattr(self, 'mean_', mean)
setattr(self, 'data_dim_', dim)
return x
def transform(self, x):
return x - self.mean_
def fit_transform(self, x:np.ndarray)->np.ndarray:
self.fit(x)
return self.transform(x)
def inverse_transform(self, x:np.ndarray)->np.ndarray:
assert x.ndim == self.data_dim_
return x + self.mean_
@property
def config_paras(self):
return ['data_dim_', 'mean_']
def get_params(self):
return {'feature_dim': self.feature_dim, 'axis': self.axis}
class Closures(ScalerWithConfig):
def __init__(
self,
force_closure:bool = False,
treat_negative:bool = False
):
"""
force_closure: bool
if ture, and input data is not a closure, it will be converted
into closure by dividing with the sum of input data
"""
self.force_closure = force_closure
self.treat_negative = treat_negative
def _check_array(self, x):
self.sum_, self.min_ = None, None
if len(x) == x.size:
x = x.reshape(-1,)
if (x<0).sum() > 0:
if self.treat_negative:
self.min_ = np.min(x)
x = x + self.min_
else:
ValueError(f"x contains {(x[x<0]).sum()} -ve values")
if not np.allclose(x.sum(), 1.0):
if self.force_closure:
self.sum_ = np.sum(x)
x = x / self.sum_
else:
raise ValueError(f"x is not a closure with sum of {round(x.sum(), 5)}")
return x
def _check_array_inv(self, x):
if len(x) == x.size:
x = x.reshape(-1,)
if self.force_closure:
x = x * self.sum_
if self.treat_negative:
x = x - self.min_
return x
def transform(self, x):
raise NotImplementedError
def fit(self, x):
return x
def fit_transform(self, x):
return self.transform(x)
@property
def config_paras(self)->List[str]:
return ['sum_', 'min_']
def get_params(self)->dict:
return {
'force_closure': self.force_closure,
'treat_negative': self.treat_negative,
}
class ALR(Closures):
"""
Additive log ratio transformation
Examples
---------
>>> from easy_mpl import hist
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> alr_tr = ALR(True, True)
>>> x = data.iloc[:, 0].values
>>> x_ = alr_tr.fit_transform(x)
>>> _x = alr_tr.inverse_transform(x_)
>>> np.allclose(_x, x)
True
>>> hist([x, x_], hist_kws={"bins": 100}, share_axes=False,
... labels=["Original", "Transformed"])
"""
def transform(self, x):
denominator_idx = 0
x = self._check_array(x)
self.x0_ = x[denominator_idx]
if x.ndim == 2:
mat_t = x.T
numerator_idx = list(range(0, mat_t.shape[0]))
del numerator_idx[denominator_idx]
x = np.log(mat_t[numerator_idx, :] / mat_t[denominator_idx, :]).T
elif x.ndim == 1:
numerator_idx = list(range(0, x.shape[0]))
del numerator_idx[denominator_idx]
x = np.log(x[numerator_idx] / x[denominator_idx])
x = np.roll(np.append(x, self.x0_), shift=1)
else:
raise ValueError("mat must be either 1D or 2D")
return x
def inverse_transform(self, x):
denominator_idx = 0
x = np.array(x)
if x.ndim == 2:
mat_idx = np.insert(x, denominator_idx,
np.repeat(0, x.shape[0]), axis=1)
comp = np.zeros(mat_idx.shape)
comp[:, denominator_idx] = 1 / (np.exp(x).sum(axis=1) + 1)
numerator_idx = list(range(0, comp.shape[1]))
del numerator_idx[denominator_idx]
for i in numerator_idx:
comp[:, i] = comp[:, denominator_idx] * np.exp(mat_idx[:, i])
elif x.ndim == 1:
mat_idx = np.insert(x, denominator_idx, 0, axis=0)
comp = np.zeros(mat_idx.shape)
comp[denominator_idx] = 1 / (np.exp(x).sum(axis=0) + 1)
numerator_idx = list(range(0, comp.shape[0]))
del numerator_idx[denominator_idx]
for i in numerator_idx:
comp[i] = comp[denominator_idx] * np.exp(mat_idx[i])
else:
raise ValueError("mat must be either 1D or 2D")
x = self._check_array_inv(x)
return x
class CLR(Closures):
"""centre log ratio transformation
Examples
---------
>>> from easy_mpl import hist
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> clr_tr = CLR(True, True)
>>> x = data.iloc[:, 0].values
>>> x_ = clr_tr.fit_transform(x)
>>> _x = clr_tr.inverse_transform(x_)
>>> np.allclose(_x, x)
True
>>> hist([x, x_], hist_kws={"bins": 100}, share_axes=False,
... labels=["Original", "Transformed"])
"""
def transform(
self,
x:np.ndarray
)->np.ndarray:
x = self._check_array(x)
lmat = np.log(x)
gm = lmat.mean(axis=-1, keepdims=True)
return (lmat - gm).squeeze()
def inverse_transform(self, x:np.ndarray)->np.ndarray:
emat = np.exp(x)
x = closure(emat, out=emat)
x = self._check_array_inv(x)
return x
class FuncTransformer(ScalerWithConfig):
"""function transformer. Transforms the array element wise."""
@property
def func(self):
raise NotImplementedError
@property
def inv_func(self):
raise NotImplementedError
def fit(self, X, y=None):
return self
def fit_transform(self, x:np.ndarray)->np.ndarray:
return self.transform(x)
def transform(self, x:np.ndarray)-> np.ndarray:
setattr(self, 'data_dim_', np.ndim(x))
return self.func(x)
def inverse_transform_without_fit(self, x):
return self._inverse_transform(x, False)
def _inverse_transform(self, x, check_dim=True):
return self.inv_func(x)
def inverse_transform(self, x):
return self._inverse_transform(x)
@property
def config_paras(self):
return ['data_dim_']
def get_params(self):
return {}
class SqrtScaler(FuncTransformer):
@property
def func(self):
return np.sqrt
@property
def inv_func(self):
return np.square
class LogScaler(FuncTransformer):
@property
def func(self):
return np.log
@property
def inv_func(self):
return np.exp
class Log2Scaler(FuncTransformer):
@property
def func(self):
return np.log2
@property
def inv_func(self):
return lambda x: np.power(2, x)
class Log10Scaler(FuncTransformer):
@property
def func(self):
return np.log10
@property
def inv_func(self):
return lambda x: np.power(10, x)
class TanScaler(FuncTransformer):
@property
def func(self):
return np.tan
@property
def inv_func(self):
return np.tanh
class LogisticSigmoidTransformer(FuncTransformer):
"""logistic sigmoid transformer.
Note that inverse transform of logistic sigmoid does not return
original array.
"""
@property
def func(self):
return expit
@property
def inv_func(self):
raise ValueError("inverse transform of sigmoid can not be computed")
class HyperbolicTangentTransformer(FuncTransformer):
"""Hyperbolic tangent"""
@property
def func(self):
return np.tanh
@property
def inv_func(self):
raise ValueError("inverse transform of tanh can not be computed")
class CumsumScaler(FuncTransformer):
def __init__(
self,
feature_dim: str = "2d"
):
"""
Arguments:
feature_dim:
whether the features are 2 dimensional or 1 dimensional. Only
relevant if the `x` to `fit_transform` is 3D. In such as case
if feature_dim is `1D`, it will be considered that the x consists
of following shape (num_examples, time_steps, num_features)
"""
assert feature_dim in ("1d", "2d")
self.feature_dim = feature_dim
def fit_transform(self, x:np.ndarray) -> np.ndarray:
self.data_dim_ = np.ndim(x)
dim = np.ndim(x)
if dim == 3 and self.feature_dim == "1d":
_x = np.full(x.shape, np.nan)
for time_step in range(x.shape[1]):
_x[:, time_step] = self.func(x[:, time_step], axis=0)
else:
_x = np.cumsum(x, axis=0)
return _x
def inverse_transform(self, x):
dim = x.ndim
assert dim == self.data_dim_, f"dimension of data changed from {self.data_dim_} to {dim}"
if dim == 3 and self.feature_dim == "1d":
_x = np.full(x.shape, np.nan)
for time_step in range(x.shape[1]):
_x[:, time_step] = np.diff(x[:, time_step], axis=0, append=0)
elif 2 <= dim < 4:
_x = np.diff(x, axis=0, append=0)
else:
raise ValueError(f" dimension {dim} not allowed")
return _x
class FunctionTransformer(SKFunctionTransformer):
"""Serializing a custom func/inverse_func is difficult. Therefore
we expect the func/inverse_func to be either numpy function or
the code as a string.
Methods
-------
from_config
Attributes
----------
inverse_func_ser
Example
-------
>>> array = np.random.randint(1, 100, (20, 2))
>>> transformer = FunctionTransformer(func=np.log2,
>>> inverse_func="lambda _x: 2**_x", validate=True)
>>> t_array = transformer.fit_transform(array)
>>> transformer.config()
>>> new_transformer = FunctionTransformer.from_config(transformer.config())
>>> original_array = new_transformer.inverse_transform(t_array)
"""
def __init__(self, func=None, inverse_func=None, validate=False,
accept_sparse=False, check_inverse=True, kw_args=None,
inv_kw_args=None):
# if inverse_func is string, we save a serialized version of it in memory
# to save it in config later.
self.inverse_func_ser = inverse_func
super().__init__(func=func,
inverse_func=inverse_func,
validate=validate,
accept_sparse=accept_sparse,
check_inverse=check_inverse,
kw_args=kw_args,
inv_kw_args=inv_kw_args)
@property
def inverse_func(self):
return self._inverse_func
@inverse_func.setter
def inverse_func(self, func):
self._inverse_func = self.deserialize_func(func)
@property
def inverse_func_ser(self):
return self._inverse_func_ser
@inverse_func_ser.setter
def inverse_func_ser(self, func):
self._inverse_func_ser = self.serialize_func(func)
@classmethod
def from_config(cls, config: dict):
"""Build the estimator from config file"""
func = cls.deserialize_func(config.pop('func'))
# do not deserialize inverse_func here, it will be done in init method
scaler = cls(func=func, inverse_func=config.pop('inverse_func'), **cls.deserialize(**config))
setattr(scaler, '_from_config', True)
return scaler
@staticmethod
def deserialize_func(func):
if func is not None:
if isinstance(func, str):
if func in np.__dict__:
func = getattr(np, func)
else:
func = eval(func)
elif isinstance(func, np.ufunc): # np.log2
func = func
elif func.__name__ in np.__dict__: # np.diff
func = func
else:
raise ValueError(f"{func}")
return func
def config(self) -> dict:
"""Returns all the parameters in scaler in a dictionary"""
params = self.get_params()
_config = dict()
_config['func'] = self.serialize_func(self.func)
_config['inverse_func'] = self.inverse_func_ser
_config['kw_args'] = jsonize(self.kw_args)
_config['inv_kw_args'] = jsonize(self.inv_kw_args)
for k, v in params.items():
if k not in _config:
_config.update({k: v})
return _config
@staticmethod
def deserialize(**kwargs):
_kwargs = {}
for k, v in kwargs.items():
if v == "None":
v = None
_kwargs[k] = v
return _kwargs
@staticmethod
def serialize_func(func):
if type(func) == np.ufunc:
func = func.__name__
elif func.__class__.__name__ == "function" and func.__module__ == "numpy":
func = func.__name__
elif func is not None:
if isinstance(func, str):
func = f"""{func}"""
else:
raise ValueError(f"{func} is not serializable")
return func
class ParetoTransformer(ScalerWithConfig):
"""
Similar to zscore/StandardScaler, but instead of dividing by standard
deviation, it devides by square root of standard deviation [11]_ and [12]_.
The standard score of a sample `x` is calculated as:
:: math
z = (x - u) / sqrt(s)
"""
def __init__(
self,
feature_dim="2d",
axis=0
):
self.feature_dim = feature_dim
self.axis = axis
def _reset(self):
for arg in ['mean_', 'var_', 'scale_', 'data_dim_']:
setattr(self, arg, None)
return
def fit(self, X, y=None):
self._reset()
self.data_dim_ = np.ndim(X)
self.mean_ = np.nanmean(X, axis=self.axis)
self.scale_ = np.sqrt(np.nanvar(X, axis=self.axis))
self.var_ = np.nanvar(X, axis=self.axis)
return self
def transform(self, X, y=None):
assert np.ndim(X) == self.data_dim_
X = X - self.mean_
return X / np.sqrt(self.scale_)
def fit_transform(self, X, y=None):
return self.fit(X, y=y).transform(X)
def inverse_transform(self, X):
X = X * np.sqrt(self.scale_)
return X + self.mean_
@property
def config_paras(self):
return ['data_dim_', 'mean_', 'var_', 'scale_']
def get_params(self):
return {'feature_dim': self.feature_dim, "axis": self.axis}
class VastTransformer(ParetoTransformer):
"""
Variable Stability Scaling following the works of Nicholson et al., 2003 [11]_
and van der Berg et al., 2006 [12]_ .
The standard score of a sample `x` is calculated as:
:: math
z = (x - u) / s * u/s
.. [11] https://doi.org/10.1016/S0003-2670(03)00094-1
.. [12] https://doi.org/10.1186/1471-2164-7-142
"""
def transform(self, X, y=None):
assert np.ndim(X) == self.data_dim_
X = X - self.mean_
X = X / self.scale_
# coefficient of variation
cv = np.divide(self.mean_ , self.scale_)
return X * cv
def inverse_transform(self, X, y=None):
cv = np.divide(self.mean_ , self.scale_)
X = X / cv
X = X * self.scale_
X = X + self.mean_
return X
class MmadTransformer(ScalerWithConfig):
"""
Median and median absolute deviation following Jain et al., 2005[13]_ and
Singh and Singh 2020 [14]_.
The standard score of a sample `x` is calculated as:
:: math
z = (x - median) / MAD
.. [13] https://doi.org/10.1016/j.patcog.2005.01.012
.. [14] https://doi.org/10.1016/j.asoc.2019.105524
"""
def __init__(
self,
feature_dim="1d",
axis=0
):
self.feature_dim = feature_dim
self.axis = axis
def get_params(self):
return {'feature_dim': self.feature_dim, "axis": self.axis}
def _reset(self):
for arg in ['med_', 'mad_', 'data_dim_']:
setattr(self, arg, None)
return
def fit(self, X, y=None):
"""fits the data i.e. calculates median and MAD of the data.
These parameters will be used during transform.
"""
self._reset()
self.data_dim_ = np.ndim(X)
self.med_ = np.nanmedian(X, axis=self.axis)
self.mad_ = np.nanmedian(np.absolute(X - self.med_), axis=self.axis)
return self
def transform(self, X, y=None):
"""transforms the data i.e. changes the data using the parameters calculated
during ``fit``.
"""
assert np.ndim(X) == self.data_dim_
X = X - self.med_
return X / self.mad_
def fit_transform(self, X, y=None):
"""First calls fit and then calls transform."""
return self.fit(X, y=y).transform(X)
def inverse_transform(self, X):
"""inverse transforms the X i.e. brings the X to original scale by using
the parameters calculated during ``fit``."""
X = X * self.mad_
return X + self.med_
@property
def config_paras(self):
return ['data_dim_', 'med_', 'mad_']
def closure(mat, out=None):
mat = np.atleast_2d(mat)
if out is not None:
out = np.atleast_2d(out)
if np.any(mat < 0):
raise ValueError("Cannot have negative proportions")
if mat.ndim > 2:
raise ValueError("Input matrix can only have two dimensions or less")
norm = mat.sum(axis=1, keepdims=True)
if np.any(norm == 0):
raise ValueError("Input matrix cannot have rows with all zeros")
return np.divide(mat, norm, out=out).squeeze() | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/preprocessing/transformations/_transformations.py | _transformations.py |
from typing import Union, List, Dict
from ai4water.backend import np, pd
from ai4water.preprocessing.transformations import Transformation
from ai4water.utils.utils import jsonize, deepcopy_dict_without_clone
class Transformations(object):
"""
While the [Transformation][ai4water.preprocessing.transformations.Transformation]
class is useful to apply a single transformation to a single data source, this
class is helpful to apply multple transformations to a single data or multiple
transformations to multiple data. This class is especially designed to be applied
as part of `model` inside the `fit`, `predict` or `evaluate` methods. The
`fit_transform` method should be applied before feeding the data to the
algorithm and `inverse_transform` method should be called after algorithm has
worked with data.
Examples
--------
>>> import numpy as np
>>> from ai4water.preprocessing.transformations import Transformations
>>> x = np.arange(50).reshape(25, 2)
>>> transformer = Transformations(['a', 'b'], config=['minmax', 'zscore'])
>>> x_ = transformer.fit_transform(x)
>>> _x = transformer.inverse_transform(x_)
...
... # Apply multiple transformations on multiple arrays which are passed as list
>>> transformer = Transformations([['a', 'b'], ['a', 'b']],
... config=['minmax', 'zscore'])
>>> x1 = np.arange(50).reshape(25, 2)
>>> x2 = np.arange(50, 100).reshape(25, 2)
>>> x1_transformed = transformer.fit_transform([x1, x2])
>>> _x1 = transformer.inverse_transform(x1_transformed)
We can also do more complicated stuff as following
>>> transformer = Transformations({'x1': ['a', 'b'], 'x2': ['a', 'b']},
... config={'x1': ['minmax', 'zscore'],
... 'x2': [{'method': 'log', 'features': ['a', 'b']},
... {'method': 'robust', 'features': ['a', 'b']}]
... })
>>> x1 = np.arange(20).reshape(10, 2)
>>> x2 = np.arange(100, 120).reshape(10, 2)
>>> x = {'x1': x1, 'x2': x2}
>>> x_transformed = transformer.fit_transform(x)
>>> _x = transformer.inverse_transform(x_transformed)
In above example we apply `minmax` and `zscore` transformations on x1
and `log` and `robust` transformations on x2 array
"""
def __init__(
self,
feature_names: Union[list, dict],
config: Union[str, list, dict] = None,
):
"""
Arguments:
feature_names:
names of features in data
config:
Determines the type of transformation to be applied on data.
It can be one of the following types
- `string` when you want to apply single transformation
>>> config='minmax'
- `dict`: to pass additional arguments to the :py:class:`ai4water.preprocessing.Transformation`
class
>>> config = {"method": 'log', 'treat_negatives': True, 'features': ['features']}
- `list` when we want to apply multiple transformations
>>> ['minmax', 'zscore']
or
>>> [{"method": 'log', 'treat_negatives': True, 'features': ['features']},
>>> {'method': 'sqrt', 'treat_negatives': True}]
"""
self.names = feature_names
self.t_config = config
self.without_fit = False
def _fetch_transformation(self, data):
config = self.t_config
if isinstance(data, list):
if isinstance(config, str):
config = [config for _ in range(len(data))]
elif isinstance(data, dict):
if isinstance(config, str):
config = {k:config for k in data.keys()}
return config
def _check_features(self):
if self.is_numpy_:
assert isinstance(self.names, list), f"""
feature_names are of type {type(self.names)}"""
elif self.is_list_:
for idx, n in enumerate(self.names):
assert isinstance(n, list), f"""
feature_names for {idx} source is {type(n)}. It should be list"""
elif self.is_dict_:
assert isinstance(self.names, dict), f"""
feature_names are of type {type(self.names)}"""
for src_name, n in self.names.items():
assert n.__class__.__name__ in ["ListWrapper", 'list']
return
def transform(self, data:Union[np.ndarray, List, Dict]):
"""Transforms the data according the the `config`.
Arguments:
data:
The data on which to apply transformations. It can be one of following
- a (2d or 3d) numpy array
- a list of numpy arrays
- a dictionary of numpy arrays
Returns:
The transformed data which has same type and dimensions as the input data
"""
if self.t_config is None: # if no transformation then just return the data as it is
return data
orignal_data_type = data.__class__.__name__
assert hasattr(self, 'transformers_'), f"transformer has not been fitted yet"
assert len(self.transformers_) > 0
# first check that data matches config
self._check_features()
# then apply transformation
data = self._transform(data)
# now pack it in original form
assert data.__class__.__name__ == orignal_data_type, f"""
type changed from {orignal_data_type} to {data.__class__.__name__}
"""
#self._assert_same_dim(self, orignal_data, data)
return data
def fit_transform(self, data:Union[np.ndarray, List, Dict]):
"""Transforms the data according the the `config`.
Arguments:
data:
The data on which to apply transformations. It can be one of following
- a (2d or 3d) numpy array
- a list of numpy arrays
- a dictionary of numpy arrays
Returns:
The transformed data which has same type and dimensions as the input data
"""
self.is_numpy_ = False
self.is_list_ = False
self.is_dict_ =False
self.transformers_ = {}
if self.t_config is None: # if no transformation then just return the data as it is
return data
orignal_data_type = data.__class__.__name__
if isinstance(data, np.ndarray):
self.is_numpy_ = True
elif isinstance(data, list):
self.is_list_ = True
elif isinstance(data, dict):
self.is_dict_ = True
else:
raise ValueError(f"invalid data of type {data.__class__.__name__}")
# first check that data matches config
self._check_features()
# then apply transformation
data = self._fit_transform(data)
# now pack it in original form
assert data.__class__.__name__ == orignal_data_type, f"""
type changed from {orignal_data_type} to {data.__class__.__name__}
"""
return data
def _transform_2d(self, data, columns, transformation=None, key="5"):
data = data.copy()
if transformation:
if isinstance(transformation, dict):
config = self.transformers_[key]
transformer = Transformation.from_config(config)
data = transformer.transform(pd.DataFrame(data, columns=columns))
# we want to apply multiple transformations
elif isinstance(transformation, list):
for idx, trans in enumerate(transformation):
if isinstance(trans, str):
config = self.transformers_[f'{key}_{trans}_{idx}']
transformer = Transformation.from_config(config)
data = transformer.transform(pd.DataFrame(data, columns=columns))
elif trans['method'] is not None:
config = self.transformers_[f'{key}_{trans["method"]}_{idx}']
transformer = Transformation.from_config(config)
data = transformer.transform(pd.DataFrame(data, columns=columns))
else:
assert isinstance(transformation, str)
transformer = Transformation.from_config(self.transformers_[key])
data = transformer.transform(pd.DataFrame(data, columns=columns))
data = data.values
return data
def _fit_transform_2d(self, data, columns, transformation=None, key="5"):
"""performs transformation on single data 2D source"""
# it is better to make a copy here because all the operations
# on data happen after this.
data = data.copy()
transformers = {}
if transformation:
if isinstance(transformation, dict):
transformer = Transformation(**transformation)
data = transformer.fit_transform(pd.DataFrame(data, columns=columns))
transformers[key] = transformer.config()
# we want to apply multiple transformations
elif isinstance(transformation, list):
for idx, trans in enumerate(transformation):
if isinstance(trans, str):
transformer = Transformation(method=trans)
data = transformer.fit_transform(pd.DataFrame(data, columns=columns))
transformers[f'{key}_{trans}_{idx}'] = transformer.config()
elif trans['method'] is not None:
transformer = Transformation(**trans)
data = transformer.fit_transform(pd.DataFrame(data, columns=columns))
transformers[f'{key}_{trans["method"]}_{idx}'] = transformer.config()
else:
raise ValueError(f"{trans['method']} is invalid transformation")
else:
assert isinstance(transformation, str)
transformer = Transformation(method=transformation)
data = transformer.fit_transform(pd.DataFrame(data, columns=columns))
transformers[key] = transformer.config()
data = data.values
self.transformers_.update(transformers)
return data
def __transform(self, data, feature_names, transformation=None, key="5"):
"""performs transformation on single data source without fiting on it first.
In case of 3d array, the shape is supposed to be following
(num_examples, time_steps, num_features)
Therefore, each time_step is extracted and transfomred individually
for example with time_steps of 2, two 2d arrays will be extracted and
transformed individually
(num_examples, 0,num_features), (num_examples, 1, num_features)
"""
if data.ndim == 3:
_data = np.full(data.shape, np.nan)
for time_step in range(data.shape[1]):
_data[:, time_step] = self._transform_2d(data[:, time_step],
feature_names,
transformation,
key=f"{key}_{time_step}")
else:
_data = self._transform_2d(data, feature_names, transformation, key=key)
return _data
def __fit_transform(self, data, feature_names, transformation=None, key="5"):
"""performs transformation on single data source
In case of 3d array, the shape is supposed to be following
(num_examples, time_steps, num_features)
Therefore, each time_step is extracted and transfomred individually
for example with time_steps of 2, two 2d arrays will be extracted and
transformed individually
(num_examples, 0,num_features), (num_examples, 1, num_features)
"""
if data.ndim == 3:
_data = np.full(data.shape, np.nan)
for time_step in range(data.shape[1]):
_data[:, time_step] = self._fit_transform_2d(data[:, time_step],
feature_names,
transformation,
key=f"{key}_{time_step}")
else:
_data = self._fit_transform_2d(data, feature_names, transformation, key=key)
return _data
def _transform(self, data, key="5"):
"""performs transformation on every data source in data"""
transformation = self._fetch_transformation(data)
if self.is_numpy_:
_data = self.__transform(data, self.names, transformation, key)
elif self.is_list_:
_data = []
for idx, array in enumerate(data):
_data.append(self.__transform(array,
self.names[idx],
transformation[idx],
key=f"{key}_{idx}")
)
else:
_data = {}
for src_name, array in data.items():
_data[src_name] = self.__transform(array,
self.names[src_name],
transformation[src_name],
f"{key}_{src_name}")
return _data
def _fit_transform(self, data, key="5"):
"""performs transformation on every data source in data"""
transformation = self._fetch_transformation(data)
if self.is_numpy_:
_data = self.__fit_transform(data, self.names, transformation, key)
elif self.is_list_:
_data = []
for idx, array in enumerate(data):
_data.append(self.__fit_transform(array,
self.names[idx],
transformation[idx],
key=f"{key}_{idx}")
)
else:
_data = {}
for src_name, array in data.items():
_data[src_name] = self.__fit_transform(array,
self.names[src_name],
transformation[src_name],
f"{key}_{src_name}")
return _data
def inverse_transform(self, data, postprocess=True):
"""inverse transforms data where data can be dictionary, list or numpy
array.
Arguments:
data:
the data which is to be inverse transformed. The output of
`fit_transform` method.
postprocess : bool
Returns:
The original data which was given to `fit_transform` method.
"""
if not hasattr(self, 'transformers_'):
raise ValueError(f"Transformations class has not been fitted yet")
return self._inverse_transform(data, postprocess=postprocess)
def inverse_transform_without_fit(self, data, postprocess=True)->np.ndarray:
data = np.array(data)
if data.ndim == 1:
data = data.reshape(-1, 1)
assert isinstance(self.names, list)
assert data.shape[-1] == len(self.names)
data = pd.DataFrame(data, columns=self.names)
kwargs = {}
if isinstance(self.t_config, str):
kwargs['method'] = self.t_config
elif isinstance(self.t_config, dict):
kwargs = self.t_config
elif isinstance(self.t_config, list):
assert len(self.t_config) == 1
t_config = self.t_config[0]
if isinstance(t_config, str):
kwargs['method'] = t_config
elif isinstance(t_config, dict):
kwargs = t_config
else:
raise ValueError(f"invalid type of t_config {t_config.__class__.__name__}")
else:
raise ValueError(f"invalid type of t_config {self.t_config.__class__.__name__}")
transformer = Transformation(**kwargs)
transformed_data = transformer.inverse_transform(data=data, postprocess=postprocess)
return transformed_data.values
def _inverse_transform(self, data, key="5", postprocess=True):
transformation = self._fetch_transformation(data)
if self.is_numpy_:
data = self.__inverse_transform(data,
self.names,
transformation,
key,
postprocess=postprocess)
elif self.is_list_:
assert isinstance(data, list)
_data = []
for idx, src in enumerate(data):
__data = self.__inverse_transform(src,
self.names[idx],
transformation[idx],
f'{key}_{idx}',
postprocess=postprocess)
_data.append(__data)
data = _data
elif self.is_dict_:
assert isinstance(data, dict)
_data = {}
for src_name, src in data.items():
_data[src_name] = self.__inverse_transform(src,
self.names[src_name],
transformation[src_name],
f'{key}_{src_name}',
postprocess=postprocess)
data = _data
return data
def __inverse_transform(self,
data,
feature_names,
transformation, key="5",
postprocess=True):
"""inverse transforms one data source which may 2d or 3d nd array"""
if data.ndim == 3:
_data = np.full(data.shape, np.nan)
for time_step in range(data.shape[1]):
_data[:, time_step] = self._inverse_transform_2d(
data[:, time_step],
columns=feature_names,
transformation=transformation,
key=f"{key}_{time_step}",
postprocess=postprocess)
else:
_data = self._inverse_transform_2d(data,
feature_names,
key,
transformation,
postprocess=postprocess)
return _data
def _inverse_transform_2d(self,
data,
columns,
key,
transformation,
postprocess=True)->np.ndarray:
"""inverse transforms one 2d array"""
data = pd.DataFrame(data.copy(), columns=columns)
if transformation is not None:
if isinstance(transformation, str):
if key not in self.transformers_:
raise ValueError(f"""
key `{key}` for inverse transformation not found. Available keys are {list(self.transformers_.keys())}""")
transformer = self.transformers_[key]
transformer, shape = transformer, transformer['shape']
original_shape = data.shape
transformer = Transformation.from_config(transformer)
transformed_data = transformer.inverse_transform(data, postprocess=postprocess)
data = transformed_data
elif isinstance(transformation, list):
# idx and trans both in reverse form
for idx, trans in reversed(list(enumerate(transformation))):
if isinstance(trans, str):
transformer = self.transformers_[f'{key}_{trans}_{idx}']
transformer, shape = transformer, transformer['shape']
transformer = Transformation.from_config(transformer)
data = transformer.inverse_transform(data=data, postprocess=postprocess)
elif trans['method'] is not None:
features = trans.get('features', columns)
# if any of the feature in data was transformed
if any([True if f in data else False for f in features]):
orig_cols = data.columns # copy teh columns in the original df
transformer = self.transformers_[f'{key}_{trans["method"]}_{idx}']
transformer, shape = transformer, transformer['shape']
data, dummy_features = conform_shape(data, shape, features) # get data to transform
transformer = Transformation.from_config(transformer)
transformed_data = transformer.inverse_transform(data=data,
postprocess=postprocess)
data = transformed_data[orig_cols] # remove the dummy data
elif isinstance(transformation, dict):
features = transformation.get('features', columns)
if any([True if f in data else False for f in features]):
orig_cols = data.columns
transformer = self.transformers_[key]
transformer, shape = transformer, transformer['shape']
data, dummy_features = conform_shape(data, shape, features=features)
transformer = Transformation.from_config(transformer)
transformed_data = transformer.inverse_transform(data=data, postprocess=postprocess)
data = transformed_data[orig_cols] # remove the dummy data
if data.__class__.__name__ == "DataFrame":
data = data.values # there is no need to return DataFrame
return data
def config(self)->dict:
"""returns a python dictionary which can be used to construct this class
in fitted form i.e as if the fit_transform method has already been applied.
Returns:
a dictionary from which `Transformations` class can be constructed
"""
return {
'transformers_': jsonize(self.transformers_),
"feature_names": self.names,
"config": self.t_config,
"is_numpy_": self.is_numpy_,
"is_dict_": self.is_dict_,
"is_list_": self.is_list_,
}
@classmethod
def from_config(cls, config:dict)->"Transformations":
"""constructs the Transformations class which may has already been fitted.
"""
config = deepcopy_dict_without_clone(config)
transformer = cls(config.pop('feature_names'), config.pop('config'))
for attr_name, attr_val in config.items():
setattr(cls, attr_name, attr_val)
return transformer
def conform_shape(data, shape, features=None):
# if the difference is of only 1 dim, we resolve it
if data.ndim > len(shape):
data = np.squeeze(data, axis=-1)
elif data.ndim < len(shape):
data = np.expand_dims(data, axis=-1)
assert data.ndim == len(shape), f"""original data had {len(shape)} wihle the
new data has {data.ndim} dimensions"""
# how manu dummy features we have to add to match the shape
dummy_features = shape[-1] - data.shape[-1]
if data.__class__.__name__ in ['DataFrame', 'Series']:
# we know what features must be in data, so put them in data one by one
# if they do not exist in data already
if features:
for f in features:
if f not in data:
data[f] = np.random.random(len(data))
# identify how many features to be added by shape information
elif dummy_features > 0:
dummy_data = pd.DataFrame(np.random.random((len(data), dummy_features)))
data = pd.concat([dummy_data, data], axis=1)
else:
dummy_data = np.random.random((len(data), dummy_features))
data = np.concatenate([dummy_data, data], axis=1)
return data, dummy_features | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/preprocessing/transformations/_wrapper.py | _wrapper.py |
import math
import warnings
from typing import Union, List, Dict
import scipy.stats as stats
from .utils import _missing_vals
from ai4water.backend import easy_mpl as ep
from .utils import pac_yw, auto_corr, plot_autocorr
from ai4water.utils.visualizations import Plot
from ai4water.utils.utils import create_subplots
from ai4water.preprocessing import Transformation
from ai4water.backend import np, pd, os, plt, sns, mpl
from ai4water.utils.utils import find_tot_plots, get_nrows_ncols
from ai4water.utils.utils import dict_to_file, dateandtime_now, ts_features
ticker = mpl.ticker
# qq plot
# decompose into trend/seasonality and noise
class EDA(Plot):
"""Performns a comprehensive exploratory data analysis on a tabular/structured
data. It is meant to be a one stop shop for eda.
Methods
---------
- heatmap
- box_plot
- plot_missing
- plot_histograms
- plot_index
- plot_data
- plot_pcs
- grouped_scatter
- correlation
- stats
- autocorrelation
- partial_autocorrelation
- probability_plots
- lag_plot
- plot_ecdf
- normality_test
- parallel_coordinates
- show_unique_vals
Example:
>>> from ai4water.datasets import busan_beach
>>> eda = EDA(data=busan_beach())
>>> eda() # to plot all available plots with single line
"""
def __init__(
self,
data: Union[pd.DataFrame, List[pd.DataFrame], Dict, np.ndarray],
in_cols=None,
out_cols=None,
path=None,
dpi=300,
save=True,
show=True,
):
"""
Arguments
---------
data : DataFrame, array, dict, list
either a dataframe, or list of dataframes or a dictionary whose
values are dataframes or a numpy arrays
in_cols : str, list, optional
columns to consider as input features
out_cols : str, optional
columns to consider as output features
path : str, optional
the path where to save the figures. If not given, plots will be
saved in 'data' folder in current working directory.
save : bool, optional
whether to save the plots or not
show : bool, optional
whether to show the plots or not
dpi : int, optional
the resolution with which to save the image
"""
if isinstance(data, np.ndarray):
data = pd.DataFrame(data)
elif isinstance(data, pd.Series):
data = pd.DataFrame(data, columns=[data.name], index=data.index)
self.data = data
self.in_cols = in_cols
self.out_cols = out_cols
self.show = show
super().__init__(path, save=save, dpi=dpi)
@property
def in_cols(self):
return self._in_cols
@in_cols.setter
def in_cols(self, x):
if x is None:
if isinstance(self.data, pd.DataFrame):
x = self.data.columns.to_list()
elif isinstance(self.data, pd.Series):
x = self.data.name
else:
raise ValueError(f"unsupported type of {self.data.__class__.__name__}")
self._in_cols = x
@property
def out_cols(self):
return self._out_cols
@out_cols.setter
def out_cols(self, x):
if x is None:
if isinstance(self.data, pd.DataFrame) or isinstance(self.data, pd.Series):
x = []
else:
raise ValueError
self._out_cols = x
def _save_or_show(self, fname, dpi=None):
return self.save_or_show(where='data', fname=fname, show=self.show, dpi=dpi,
close=False)
def __call__(self,
methods: Union[str, list] = 'all',
cols=None,
):
"""Shortcut to draw maximum possible plots.
Arguments
---------
methods : str, list, optional
the methods to call. If 'all', all available methods will be called.
cols : str, list, optional
columns to use for plotting. If None, all columns will be used.
"""
all_methods = [
'heatmap', 'plot_missing', 'plot_histograms', 'plot_data',
'plot_index', 'stats', 'box_plot',
'autocorrelation', 'partial_autocorrelation',
'lag_plot', 'plot_ecdf'
]
if isinstance(self.data, pd.DataFrame) and self.data.shape[-1] > 1:
all_methods = all_methods + [ # 'plot_pcs',
'grouped_scatter',
'correlation']
if isinstance(methods, str):
if methods == 'all':
methods = all_methods
else:
methods = [methods]
else:
assert isinstance(methods, list)
assert all([m in all_methods for m in methods])
for m in methods:
if m in ["plot_index", "stats", "plot_pcs"]:
getattr(self, m)()
else:
getattr(self, m)(cols=cols)
return
def heatmap(self,
st=None,
en=None,
cols=None,
figsize: tuple = None,
**kwargs):
"""
Plots data as heatmap which depicts missing values.
Arguments
---------
st : int, str, optional
starting row/index in data to be used for plotting
en : int, str, optional
end row/index in data to be used for plotting
cols : str, list
columns to use to draw heatmap
figsize : tuple, optional
figure size
**kwargs :
Keyword arguments for sns.heatmap
Return
------
None
Example
-------
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> vis = EDA(data)
>>> vis.heatmap()
"""
if sns is None:
raise SeabornNotFound()
return self._call_method('_heatmap_df',
cols=cols,
st=st,
en=en,
figsize=figsize,
**kwargs)
def _heatmap_df(
self,
data: pd.DataFrame,
cols=None,
st=None,
en=None,
spine_color: str = "#EEEEEE",
title=None,
title_fs=16,
fname="",
figsize= None,
**kwargs
):
"""
Plots a heat map of a dataframe. Helpful to show where missing values are
located in a dataframe.
Arguments:
data : pd.DataFrame,
cols : list, columns from data to be used.
st : starting row/index in data to be used for plotting
en : end row/index in data to be used for plotting
spine_color
title: str, title of the plot
title_fs: int, font size of title
fname: str, name of saved file, only valid if save is True.
kwargs: following kwargs are allowed:
xtick_labels_fs, 12
ytick_labels_fs, 20
figsize: tuple
any additional keyword argument will be passed to sns.heatmap
Return:
"""
if cols is None:
cols = data.columns
data = _preprocess_df(data, st, en)
_kwargs = {
"xtick_labels_fs": 12,
"ytick_labels_fs": 20
}
for k in _kwargs.keys():
if k in kwargs:
_kwargs[k] = kwargs.pop(k)
show_time_on_yaxis = False
if isinstance(data.index, pd.DatetimeIndex):
show_time_on_yaxis = True
_, axis = plt.subplots(figsize=figsize or (5 + len(cols)*0.25, 10 + len(cols)*0.1))
# ax2 - Heatmap
sns.heatmap(data[cols].isna(), cbar=False, cmap="binary", ax=axis, **kwargs)
axis.set_yticks(axis.get_yticks()[0::5].astype('int'))
if show_time_on_yaxis:
index = pd.date_range(data.index[0], data.index[-1],
periods=len(axis.get_yticks()))
# formatting y-ticklabels
index = [d.strftime('%Y-%m-%d') for d in index]
axis.set_yticklabels(index, fontsize="18")
else:
axis.set_yticklabels(axis.get_yticks(),
fontsize=_kwargs['ytick_labels_fs'])
axis.set_xticklabels(
axis.get_xticklabels(),
horizontalalignment="center",
fontweight="light",
fontsize=_kwargs['xtick_labels_fs'],
)
axis.tick_params(length=1, colors="#111111")
axis.set_ylabel("Examples", fontsize="24")
for _, spine in axis.spines.items():
spine.set_visible(True)
spine.set_color(spine_color)
if title is not None:
axis.set_title(title, fontsize=title_fs)
self._save_or_show(fname=fname + '_heat_map', dpi=500)
return axis
def plot_missing(self, st=None, en=None, cols=None, **kwargs):
"""
plot data to indicate missingness in data
Arguments
---------
cols : list, str, optional
columns to be used.
st : int, str, optional
starting row/index in data to be used for plotting
en : int, str, optional
end row/index in data to be used for plotting
**kwargs :
Keyword Args such as figsize
Example
-------
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> vis = EDA(data)
>>> vis.plot_missing()
"""
return self._call_method('_plot_missing_df', cols=cols, st=st, en=en, **kwargs)
def _plot_missing_df(self,
data: pd.DataFrame,
cols=None,
st=None,
en=None,
fname: str = '',
**kwargs):
"""
kwargs:
xtick_labels_fs
ytick_labels_fs
figsize
any other keyword argument will be passed to bar_chart()
"""
ax1 = None
if cols is None:
cols = data.columns
data = data[cols]
data = _preprocess_df(data, st, en)
# Identify missing values
mv_total, _, mv_cols, _, mv_cols_ratio = _missing_vals(data).values()
_kwargs = {
"xtick_labels_fs": 12,
"ytick_labels_fs": 20,
"figsize": (5 + len(cols)*0.25, 10 + len(cols)*0.1),
}
for k in _kwargs.keys():
if k in kwargs:
_kwargs[k] = kwargs.pop(k)
if mv_total < 6:
print("No missing values found in the dataset.")
else:
# Create figure and axes
plt.close('all')
fig = plt.figure(figsize=_kwargs['figsize'])
gs = fig.add_gridspec(nrows=1, ncols=1, left=0.1, wspace=0.05)
ax1 = fig.add_subplot(gs[:1, :5])
# ax1 - Barplot
ax1 = ep.bar_chart(labels=list(data.columns),
values=np.round(mv_cols_ratio * 100, 2),
orient='v',
show=False,
ax=ax1)
ax1.set(frame_on=True, xlim=(-0.5, len(mv_cols) - 0.5))
ax1.set_ylim(0, np.max(mv_cols_ratio) * 100)
ax1.grid(linestyle=":", linewidth=1)
ax1.set_yticklabels(ax1.get_yticks(), fontsize="18")
ax1.yaxis.set_major_formatter(ticker.FormatStrFormatter('%.1f'))
ax1.set_ylabel("Missing Percentage", fontsize=_kwargs['ytick_labels_fs'])
ax1.set_xticklabels(
ax1.get_xticklabels(),
horizontalalignment="center",
fontweight="light",
rotation=90,
fontsize=_kwargs['xtick_labels_fs'],
)
ax1.tick_params(axis="y", colors="#111111", length=1)
# annotate missing values on top of the bars
for rect, label in zip(ax1.patches, mv_cols):
height = rect.get_height()
ax1.text(
0.1 + rect.get_x() + rect.get_width() / 2,
height + height*0.02,
label,
ha="center",
va="bottom",
rotation="horizontal",
alpha=0.5,
fontsize="11",
)
self._save_or_show(fname=fname+'_missing_vals', dpi=500)
return ax1
def plot_data(
self,
st=None,
en=None,
freq: str = None,
cols=None,
max_cols_in_plot: int = 10,
ignore_datetime_index=False,
**kwargs
):
"""
Plots the data.
Arguments
---------
st : int, str, optional
starting row/index in data to be used for plotting
en : int, str, optional
end row/index in data to be used for plotting
cols : str, list, optional
columns in data to consider for plotting
max_cols_in_plot : int, optional
Maximum number of columns in one plot. Maximum number of plots
depends upon this value and number of columns
in data.
freq : str, optional
one of 'daily', 'weekly', 'monthly', 'yearly', determines
interval of plot of data. It is valid for only time-series data.
ignore_datetime_index : bool, optional
only valid if dataframe's index is `pd.DateTimeIndex`. In such a case, if
you want to ignore time index on x-axis, set this to True.
**kwargs :
ary arguments for pandas plot method_
.. _method:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.plot.html
Example
-------
>>> from ai4water.datasets import busan_beach
>>> eda = EDA(busan_beach())
>>> eda.plot_data(subplots=True, figsize=(12, 14), sharex=True)
>>> eda.plot_data(freq='monthly', subplots=True, figsize=(12, 14), sharex=True)
"""
return self._call_method("_plot_df",
st=st,
en=en,
cols=cols,
freq=freq,
max_cols_in_plot=max_cols_in_plot,
ignore_datetime_index=ignore_datetime_index,
**kwargs)
def _plot_df(self,
df,
st=None,
en=None,
cols=None,
freq=None,
max_cols_in_plot=10,
prefix='',
leg_kws=None,
label_kws=None,
tick_kws=None,
ignore_datetime_index=False,
**kwargs):
"""Plots each columns of dataframe and saves it if `save` is True.
max_subplots: determines how many sub_plots are to be plotted within
one plot. If dataframe contains columns
greater than max_subplots, a separate plot will be generated for remaining columns.
"""
assert isinstance(df, pd.DataFrame)
plt.close('all')
if leg_kws is None:
leg_kws = {'fontsize': 14}
if label_kws is None:
label_kws = {'fontsize': 14}
if tick_kws is None:
tick_kws = {'axis': "both", 'which': 'major', 'labelsize': 12}
df = _preprocess_df(df, st, en, cols, ignore_datetime_index=ignore_datetime_index)
if df.shape[1] <= max_cols_in_plot:
if freq is None:
kwargs = plot_style(df, **kwargs)
axis = df.plot(**kwargs)
if isinstance(axis, np.ndarray):
for ax in axis:
set_axis_paras(ax, leg_kws, label_kws, tick_kws)
else:
set_axis_paras(axis, leg_kws, label_kws, tick_kws)
self._save_or_show(fname=f"input_{prefix}")
else:
self._plot_df_with_freq(df, freq, **kwargs)
else:
tot_plots = find_tot_plots(df.shape[1], max_cols_in_plot)
for i in range(len(tot_plots) - 1):
st, en = tot_plots[i], tot_plots[i + 1]
sub_df = df.iloc[:, st:en]
if freq is None:
kwargs = plot_style(sub_df, **kwargs)
axis = sub_df.plot(**kwargs)
if kwargs.get('subplots', False):
for ax in axis:
ax.legend(**leg_kws)
ax.set_ylabel(ax.get_ylabel(), **label_kws)
ax.set_xlabel(ax.get_xlabel(), **label_kws)
ax.tick_params(**tick_kws)
else:
axis.legend(**leg_kws)
axis.set_ylabel(axis.get_ylabel(), **label_kws)
axis.set_xlabel(axis.get_xlabel(), **label_kws)
axis.tick_params(**tick_kws)
self._save_or_show(fname=f'input_{prefix}_{st}_{en}')
else:
self._plot_df_with_freq(sub_df, freq,
prefix=f'{prefix}_{st}_{en}',
**kwargs)
return
def _plot_df_with_freq(self,
df: pd.DataFrame,
freq: str,
prefix: str = '',
**kwargs):
"""Plots a dataframe which has data as time-series and its index is pd.DatetimeIndex"""
validate_freq(df, freq)
st_year = df.index[0].year
en_year = df.index[-1].year
assert isinstance(df.index, pd.DatetimeIndex)
for yr in range(st_year, en_year + 1):
_df = df[df.index.year == yr]
if freq == 'yearly':
kwargs = plot_style(_df, **kwargs)
_df.plot(**kwargs)
self._save_or_show(fname=f'input_{prefix}_{str(yr)}')
elif freq == 'monthly':
st_mon = _df.index[0].month
en_mon = _df.index[-1].month
for mon in range(st_mon, en_mon+1):
__df = _df[_df.index.month == mon]
kwargs = plot_style(__df, **kwargs)
__df.plot(**kwargs)
self._save_or_show(fname=f'input_{prefix}_{str(yr)} _{str(mon)}')
elif freq == 'weekly':
st_week = _df.index[0].isocalendar()[1]
en_week = _df.index[-1].isocalendar()[1]
for week in range(st_week, en_week+1):
__df = _df[_df.index.week == week]
kwargs = plot_style(__df, **kwargs)
__df.plot(**kwargs)
self._save_or_show(fname=f'input_{prefix}_{str(yr)} _{str(week)}')
return
def parallel_corrdinates(
self,
cols=None,
st=None,
en=100,
color=None,
**kwargs
):
"""
Plots data as parallel coordinates.
Arguments
----------
st :
start of data to be considered
en :
end of data to be considered
cols :
columns from data to be considered.
color :
color or colormap to be used.
**kwargs :
any additional keyword arguments to be passed to easy_mpl.parallel_coordinates_
.. _easy_mpl.parallel_coordinates:
https://easy-mpl.readthedocs.io/en/latest/plots.html#easy_mpl.parallel_coordinates
"""
return self._call_method(
"_pcorrd_df",
cols=cols,
st=st,
en=en,
color=color,
**kwargs
)
def _pcorrd_df(self,
data,
st=None,
en=100,
cols=None,
color=None,
prefix="",
**kwargs):
data = _preprocess_df(data, st, en, cols)
if data.isna().sum().sum() > 0:
warnings.warn("Dropping rows from data which contain nans.")
data = data.dropna()
if data.shape[0]>1:
categories = None
if self.out_cols and len(self.out_cols)==1:
out_col = self.out_cols[0]
if out_col in data:
categories = data.pop(out_col)
#else:
... # todo categories = self.data[out_col]
ep.parallel_coordinates(data, cmap=color, categories=categories,
show=False, **kwargs)
return self._save_or_show(fname=f"parallel_coord_{prefix}")
else:
warnings.warn("""
Not plotting parallel_coordinates because number of rows are below 2.""")
def normality_test(
self,
method="shapiro",
cols=None,
st=None,
en=None,
orientation="h",
color=None,
figsize: tuple = None,
):
"""plots the statistics of nromality test as bar charts. The statistics
for each feature are calculated either Shapiro-wilke_
test or Anderson-Darling test][] or Kolmogorov-Smirnov test using
scipy.stats.shapiro or scipy.stats.anderson functions respectively.
Arguments
---------
method :
either "shapiro" or "anderson", or "kolmogorov" default is "shapiro"
cols :
columns to use
st : optional
start of data
en : optional
end of data to use
orientation : optional
orientation of bars
color :
color to use
figsize : tuple, optional
figure size (width, height)
Example
-------
>>> from ai4water.eda import EDA
>>> from ai4water.datasets import busan_beach
>>> eda = EDA(data=busan_beach())
>>> eda.normality_test()
.. _Shapiro-wilke:
https://en.wikipedia.org/wiki/Shapiro%E2%80%93Wilk_test
"""
return self._call_method(
"_normality_test_df",
method=method,
cols=cols,
st=st,
en=en,
orientation=orientation,
color=color,
figsize=figsize
)
def _normality_test_df(
self, data, cols=None, st=None, en=None,
method="shapiro",
orientation="h",
prefix="",
color=None,
figsize=None,
):
"""calculates normality test for each column of a DataFrame"""
assert method in ("shapiro", "anderson", "kolmogorov")
data = _preprocess_df(data, st, en, cols)
ranks = []
# calculate stats for each column
for col in data.columns:
x = data[col].dropna().values
if method=="shapiro":
s, _ = stats.shapiro(x)
elif method == "kolmogorov":
s, _ = stats.kstest(x, "norm")
else:
s, _, _ = stats.anderson(x, "norm")
ranks.append(s)
_, ax = plt.subplots(figsize=figsize)
ep.bar_chart(labels=data.columns.tolist(),
values=ranks,
orient=orientation,
show=False,
sort=True,
color=color,
ax=ax
)
return self._save_or_show(fname=f"shapiro_normality_test_{prefix}")
def correlation(
self,
remove_targets=False,
st=None,
en=None,
cols = None,
method: str = "pearson",
split: str = None,
**kwargs
):
"""
Plots correlation between features.
Arguments
---------
remove_targets : bool, optional
whether to remove the output/target column or not
st :
starting row/index in data to be used for plotting
en :
end row/index in data to be used for plotting
cols :
columns to use
method : str, optional
{"pearson", "spearman", "kendall", "covariance"}, by default "pearson"
split : str
To plot only positive correlations, set it to "pos" or to plot
only negative correlations, set it to "neg".
**kwargs : keyword Args
Any additional keyword arguments for seaborn.heatmap
Example
-------
>>> from ai4water.eda import EDA
>>> from ai4water.datasets import busan_beach
>>> vis = EDA(busan_beach())
>>> vis.correlation()
"""
# todo, by default it is using corr_coeff, added other possible correlation methods such as
# rank correlation etc
if cols is None:
if remove_targets:
cols = self.in_cols
else:
cols = self.in_cols + self.out_cols
if isinstance(cols, dict):
cols = None
if sns is None:
raise SeabornNotFound()
return self._call_method("_feature_feature_corr_df",
cols=cols,
st=st,
en=en,
method=method,
split=split,
**kwargs)
def _feature_feature_corr_df(self,
data,
cols=None,
st=None,
en=None,
prefix='',
split=None,
threshold=0,
method='pearson',
**kwargs
):
"""
split : Optional[str], optional
Type of split to be performed {None, "pos", "neg", "high", "low"}, by default
None
method : str, optional
{"pearson", "spearman", "kendall"}, by default "pearson"
kwargs
* vmax: float, default is calculated from the given correlation \
coefficients.
Value between -1 or vmin <= vmax <= 1, limits the range of the cbar.
* vmin: float, default is calculated from the given correlation \
coefficients.
Value between -1 <= vmin <= 1 or vmax, limits the range of the cbar.
"""
plt.close('all')
if cols is None:
cols = data.columns.to_list()
data = _preprocess_df(data, st, en)
if method == "covariance":
corr = np.cov(data[cols].values.transpose())
corr = pd.DataFrame(corr, columns=cols)
else:
corr = data[cols].corr(method=method)
if split == "pos":
corr = corr.where((corr >= threshold) & (corr > 0))
elif split == "neg":
corr = corr.where((corr <= threshold) & (corr < 0))
mask = np.zeros_like(corr, dtype=np.bool)
vmax = np.round(np.nanmax(corr.where(~mask)) - 0.05, 2)
vmin = np.round(np.nanmin(corr.where(~mask)) + 0.05, 2)
figsize = (5 + len(cols)*0.25, 9 + len(cols)*0.1)
if 'figsize' in kwargs:
figsize = kwargs.pop('figsize')
# width x height
_, ax = plt.subplots(figsize=figsize)
_kwargs = dict(
annot= True if len(cols) <= 20 else False,
cmap="BrBG",
vmax=vmax,
vmin=vmin,
linewidths=0.5,
annot_kws={"size": 10},
cbar_kws={"shrink": 0.95, "aspect": 30},
fmt='.2f',
center=0
)
if kwargs:
# pass any keyword argument provided by the user to sns.heatmap
_kwargs.update(kwargs)
ax = sns.heatmap(corr, ax=ax, **_kwargs)
ax.set(frame_on=True)
self._save_or_show(fname=f"{split if split else ''}_feature_corr_{prefix}")
return ax
def plot_pcs(self, num_pcs=None, st=None, en=None, save_as_csv=False,
figsize=(12, 8), **kwargs):
"""Plots principle components.
Arguments
---------
num_pcs :
st : starting row/index in data to be used for plotting
en : end row/index in data to be used for plotting
save_as_csv :
figsize :
kwargs :will go to sns.pairplot.
"""
if isinstance(self.data, list):
for idx, data in enumerate(self.data):
self._plot_pcs(data[self.in_cols],
num_pcs,
st=st, en=en,
prefix=str(idx), save_as_csv=save_as_csv,
hue=self.out_cols[idx], figsize=figsize, **kwargs)
elif isinstance(self.data, dict):
for data_name, data in self.data.items():
self._plot_pcs(data[self.in_cols], num_pcs,
st=st, en=en,
prefix=data_name, save_as_csv=save_as_csv,
hue=self.out_cols,
figsize=figsize, **kwargs)
else:
self._plot_pcs(self.data[self.in_cols],
num_pcs,
st=st, en=en,
save_as_csv=save_as_csv, hue=self.out_cols,
figsize=figsize, **kwargs)
return
def _plot_pcs(self,
data,
num_pcs,
st=None,
en=None,
prefix='',
save_as_csv=False,
hue=None,
figsize=(12, 8), **kwargs):
data = _preprocess_df(data, st, en)
if num_pcs is None:
_num_pcs = int(data.shape[1]/2)
if _num_pcs > 5 and num_pcs is None:
num_pcs = 5
else:
num_pcs = _num_pcs
if num_pcs < 1:
print(f'{num_pcs} pcs can not be plotted because data has shape {data.shape}')
return
# df_pca = data[self.in_cols]
# pca = PCA(n_components=num_pcs).fit(df_pca)
# df_pca = pd.DataFrame(pca.transform(df_pca))
transformer = Transformation(data=data, method='pca', n_components=num_pcs,
replace_nans=True)
df_pca = transformer.transform()
pcs = ['pc' + str(i + 1) for i in range(num_pcs)]
df_pca.columns = pcs
if hue is not None and len(self.out_cols) > 0:
if isinstance(hue, list):
if len(hue) == 1:
hue = hue[0]
else:
hue = None
if hue in data:
df_pca[hue] = data[hue]
# output columns contains nans, so don't use it as hue.
if df_pca[hue].isna().sum() > 0:
hue = None
if isinstance(hue, list) and len(hue) == 0:
hue = None
if save_as_csv:
df_pca.to_csv(os.path.join(self.path, f"data\\first_{num_pcs}_pcs_{prefix}"))
plt.close('all')
plt.figure(figsize=figsize)
sns.pairplot(data=df_pca, vars=pcs, hue=hue, **kwargs)
self._save_or_show(fname=f"first_{num_pcs}_pcs_{prefix}")
return
def grouped_scatter(
self,
cols=None,
st=None,
en=None,
max_subplots: int = 8,
**kwargs
):
"""Makes scatter plot for each of feature in data.
Arguments
----------
st :
starting row/index in data to be used for plotting
en :
end row/index in data to be used for plotting
cols :
max_subplots : int, optional
it can be set to large number to show all the scatter plots on one
axis.
kwargs :
keyword arguments for sns.pariplot
"""
if sns is None:
raise SeabornNotFound()
self._call_method('_grouped_scatter_plot_df',
max_subplots=max_subplots,
cols=cols,
st=st,
en=en,
**kwargs)
return
def _grouped_scatter_plot_df(
self,
data: pd.DataFrame,
max_subplots: int = 10,
st=None,
en=None,
cols = None,
prefix='',
**kwargs):
"""
max_subplots: int, it can be set to large number to show all the scatter
plots on one axis.
"""
data = data.copy()
data = _preprocess_df(data, st, en, cols=cols)
if data.shape[1] <= max_subplots:
self._grouped_scatter_plot(data, name=f'grouped_scatter_{prefix}',
**kwargs)
else:
tot_plots = find_tot_plots(data.shape[1], max_subplots)
for i in range(len(tot_plots) - 1):
st, en = tot_plots[i], tot_plots[i + 1]
sub_df = data.iloc[:, st:en]
self._grouped_scatter_plot(sub_df,
name=f'grouped_scatter_{prefix}_{st}_{en}',
**kwargs)
return
def _grouped_scatter_plot(self, df, name='grouped_scatter', **kwargs):
plt.close('all')
sns.set()
sns.pairplot(df, size=2.5, **kwargs)
self._save_or_show(fname=name)
return
def plot_histograms(
self,
st=None,
en=None,
cols=None,
max_subplots: int = 40,
figsize: tuple = (20, 14),
**kwargs
):
"""Plots distribution of data as histogram_.
Arguments
---------
st :
starting index of data to use
en :
end index of data to use
cols :
columns to use
max_subplots : int, optional
maximum number of subplots in one figure
figsize :
figure size
**kwargs : anykeyword argument for pandas.DataFrame.hist function
.. _histogram:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.hist.html
"""
return self._call_method("_plot_hist_df", st=st, en=en, cols=cols,
figsize=figsize,
max_subplots=max_subplots,
**kwargs)
def _plot_hist_df(self,
data: pd.DataFrame,
cols=None,
st=None,
en=None,
prefix='',
bins=100,
figsize=(20, 14),
max_subplots: int = 40,
**kwargs
):
"""Plots histogram of one dataframe"""
data = _preprocess_df(data, st, en, cols)
if data.shape[1] <= max_subplots:
return self._hist_df(data, bins, figsize, prefix, **kwargs)
tot_plots = find_tot_plots(data.shape[1], max_subplots)
for i in range(len(tot_plots) - 1):
st, en = tot_plots[i], tot_plots[i + 1]
self._hist_df(data.iloc[:, st:en],
bins, figsize,
prefix=f'hist_{prefix}_{i}_{st}_{en}',
**kwargs)
return
def _hist_df(self, data, bins, figsize, prefix, **kwargs):
axis = data.hist(bins=bins, figsize=figsize, **kwargs)
self._save_or_show(fname=f"hist_{prefix}")
return axis
def plot_index(self, st=None, en=None, **kwargs):
"""plots the datetime index of dataframe
"""
return self._call_method("_plot_index", st=st, en=en, **kwargs)
def _plot_index(self,
index,
st=None,
en=None,
fname="index",
figsize=(10, 5),
label_fs=18,
title_fs=20,
leg_fs=14,
leg_ms=4,
color='r',
):
"""
Plots the index of a datafram.
index: can be pandas dataframe or index itself. if dataframe, its index
will be used for plotting
"""
plt.close('all')
if isinstance(index, pd.DataFrame):
index = index.index
idx = pd.DataFrame(np.ones(len(index)), index=index,
columns=['Observations'])
axis = idx.plot(linestyle='', marker='.', color=color, figsize=figsize)
axis.legend(fontsize=leg_fs, markerscale=leg_ms)
axis.set_xlabel(axis.get_xlabel(), fontdict={'fontsize': label_fs})
axis.set_title("Temporal distribution of Observations", fontsize=title_fs)
axis.get_yaxis().set_visible(False)
self._save_or_show(fname=fname)
return axis
def stats(self,
precision=3,
inputs=True,
outputs=True,
st=None,
en=None,
out_fmt="csv",
):
"""Finds the stats of inputs and outputs and puts them in a json file.
inputs: bool
fpath: str, path like
out_fmt: str, in which format to save. csv or json"""
cols = []
fname = "data_description_"
if inputs:
cols += self.in_cols
fname += "inputs_"
if outputs:
cols += self.out_cols
fname += "outputs_"
fname += str(dateandtime_now())
def save_stats(_description, _fpath):
if self.save:
if out_fmt == "csv":
pd.DataFrame.from_dict(_description).to_csv(_fpath + ".csv")
else:
dict_to_file(others=_description, path=_fpath + ".json")
description = {}
if isinstance(self.data, pd.DataFrame):
description = {}
for col in cols:
if col in self.data:
description[col] = ts_features(
_preprocess_df(self.data[col],
st, en),
precision=precision, name=col)
save_stats(description, self.path)
elif isinstance(self.data, list):
description = {}
for idx, data in enumerate(self.data):
_description = {}
if isinstance(data, pd.DataFrame):
for col in cols:
if col in data:
_description[col] = ts_features(
_preprocess_df(data[col], st, en),
precision=precision, name=col)
description['data' + str(idx)] = _description
_fpath = os.path.join(self.path, fname + f'_{idx}')
save_stats(_description, _fpath)
elif isinstance(self.data, dict):
for data_name, data in self.data.items():
_description = {}
if isinstance(data, pd.DataFrame):
for col in data.columns:
_description[col] = ts_features(
_preprocess_df(data[col], st, en),
precision=precision, name=col)
description[f'data_{data_name}'] = _description
_fpath = os.path.join(self.path, fname + f'_{data_name}')
save_stats(_description, _fpath)
else:
print(f"description can not be found for data type of {self.data.__class__.__name__}")
return description
def box_plot(
self,
st=None,
en=None,
cols: Union[list, str] = None,
violen=False,
normalize=True,
figsize=(12, 8),
max_features=8,
show_datapoints=False,
freq=None,
**kwargs
):
"""
Plots box whisker or violen plot of data.
Arguments
---------
st : optional
starting row/index in data to be used for plotting
en : optional
end row/index in data to be used for plotting
cols : list,
the name of columns from data to be plotted.
normalize :
If True, then each feature/column is rescaled between 0 and 1.
figsize :
figure size
freq : str,
one of 'weekly', 'monthly', 'yearly'. If given, box plot will be
plotted for these intervals.
max_features : int,
maximum number of features to appear in one plot.
violen : bool,
if True, then violen plot will be plotted else box_whisker plot
show_datapoints : bool
if True, sns.swarmplot() will be plotted. Will be time
consuming for bigger data.
**kwargs :
any args for seaborn.boxplot/seaborn.violenplot or seaborn.swarmplot.
"""
if sns is None:
raise SeabornNotFound()
return self._call_method("_box_plot",
st=st, en=en, cols=cols,
normalize=normalize,
max_features=max_features,
figsize=figsize,
show_datapoints=show_datapoints,
freq=freq,
#prefix=fname,
violen=violen,
**kwargs)
def _box_plot(self,
data,
cols,
st=None,
en=None,
normalize=True,
figsize=(12, 8),
max_features=8,
show_datapoints=False,
freq=None,
violen=False,
prefix='',
**kwargs):
data = _preprocess_df(data, st, en, cols)
axis = None
if data.shape[1] <= max_features:
axis = self._box_plot_df(data,
normalize=normalize,
show_datapoints=show_datapoints,
violen=violen,
freq=freq,
prefix=f"{'violen' if violen else 'box'}_{prefix}",
figsize=figsize,
**kwargs
)
else:
tot_plots = find_tot_plots(data.shape[1], max_features)
for i in range(len(tot_plots) - 1):
_st, _en = tot_plots[i], tot_plots[i + 1]
self._box_plot_df(data.iloc[:, _st:_en],
normalize=normalize,
show_datapoints=show_datapoints,
violen=violen,
figsize=figsize,
freq=freq,
prefix=f"{'violen' if violen else 'box'}_{prefix}_{_st}_{_en}",
**kwargs)
return axis
def _box_plot_df(self,
data,
normalize=True,
show_datapoints=False,
violen=False,
figsize=(12, 8),
prefix="box_plot",
freq=None,
**kwargs
):
data = data.copy()
# if data contains duplicated columns, transformation will not work
data = data.loc[:, ~data.columns.duplicated()]
if normalize:
transformer = Transformation()
data = transformer.fit_transform(data)
if freq is not None:
return self._box_plot_with_freq(data,
freq=freq,
show_datapoints=show_datapoints,
figsize=figsize,
violen=violen,
prefix=prefix,
**kwargs
)
return self.__box_plot_df(data=data,
name=prefix,
violen=violen,
figsize=figsize,
show_datapoints=show_datapoints,
**kwargs)
def __box_plot_df(self,
data,
name,
violen=False,
figsize=(12, 8),
show_datapoints=False,
**kwargs):
plt.close('all')
plt.figure(figsize=figsize)
if violen:
axis = sns.violinplot(data=data, **kwargs)
else:
axis = sns.boxplot(data=data, **kwargs)
axis.set_xticklabels(list(data.columns), fontdict={'rotation': 70})
if show_datapoints:
sns.swarmplot(data=data)
self._save_or_show(fname=name)
return axis
def _box_plot_with_freq(self,
data,
freq,
violen=False,
show_datapoints=False,
figsize=(12, 8),
name='bw',
prefix='',
**kwargs
):
validate_freq(data, freq)
st_year = data.index[0].year
en_year = data.index[-1].year
for yr in range(st_year, en_year + 1):
_df = data[data.index.year == yr]
if freq == 'yearly':
self._box_plot_df(_df,
name=f'{name}_input_{prefix}_{str(yr)}',
figsize=figsize,
violen=violen,
show_datapoints=show_datapoints,
**kwargs)
elif freq == 'monthly':
st_mon = _df.index[0].month
en_mon = _df.index[-1].month
for mon in range(st_mon, en_mon+1):
__df = _df[_df.index.month == mon]
self._box_plot_df(__df,
name=f'{prefix}_{str(yr)} _{str(mon)}',
where='data/monthly',
figsize=figsize,
violen=violen,
show_datapoints=show_datapoints,
**kwargs)
elif freq == 'weekly':
st_week = _df.index[0].isocalendar()[1]
en_week = _df.index[-1].isocalendar()[1]
for week in range(st_week, en_week+1):
__df = _df[_df.index.week == week]
self._box_plot_df(__df,
name=f'{prefix}_{str(yr)} _{str(week)}',
where='data/weely',
violen=violen,
figsize=figsize,
show_datapoints=show_datapoints,
**kwargs)
return
def autocorrelation(
self,
n_lags: int = 10,
cols: Union[list, str] = None,
figsize: tuple = None,
):
"""autocorrelation of individual features of data
Arguments
---------
n_lags : int, optional
number of lag steps to consider
cols : str, list, optional
columns to use. If not defined then all the columns are used
figsize : tuple, optional
figure size
"""
return self._call_method("_autocorr_df", partial=False,
n_lags=n_lags,
cols=cols,
figsize=figsize
)
def partial_autocorrelation(
self,
n_lags: int = 10,
cols: Union[list, str] = None,
):
"""Partial autocorrelation of individual features of data
Arguments
---------
n_lags : int, optional
number of lag steps to consider
cols : str, list, optional
columns to use. If not defined then all the columns are used
"""
return self._call_method("_autocorr_df", partial=True, n_lags=n_lags,
cols=cols)
def _autocorr_df(
self,
data: pd.DataFrame,
n_lags: int,
partial: bool = False,
cols=None,
figsize=None,
fname='',
):
"""autocorrelation on a dataframe."""
prefix = 'Partial' if partial else ''
if cols is not None:
if isinstance(cols, str):
cols = [cols]
assert isinstance(cols, list)
data = data[cols]
non_nan = data.isna().sum()
num_subplots = max(math.ceil(len(non_nan[non_nan == 0])/2)*2, 1)
fig, axis = create_subplots(naxes=num_subplots, figsize=figsize,
sharex=True, sharey=True
)
axis = np.array(axis) # if it is a single axis then axis.flat will not work
nrows = axis.shape[0]
for col, ax in zip(data.columns, axis.flat):
x = data[col].values
if np.isnan(x).sum() == 0:
if partial:
_ac = pac_yw(x, n_lags)
else:
_ac = auto_corr(x, n_lags)
plot_autocorr(_ac, axis=ax, legend=col, show=False,
legend_fs=nrows*1.5)
else:
print(f"cannot plot autocorrelation for {col} feature")
plt.suptitle(f"{prefix} Autocorrelation",
fontsize=nrows*2)
fname = f"{prefix} autocorr_{fname}"
self._save_or_show(fname=fname)
return axis
def _call_method(self, method_name, *args, **kwargs):
"""calls the method with the data and args + kwargs"""
if isinstance(self.data, list):
for idx, data in enumerate(self.data):
getattr(self, method_name)(data, fname=str(idx), *args, **kwargs)
elif isinstance(self.data, dict):
for data_name, data in self.data.items():
getattr(self, method_name)(data, fname=data_name, *args, **kwargs)
else:
return getattr(self, method_name)(self.data, *args, **kwargs)
def probability_plots(
self,
cols: Union[str, list] = None
):
"""
draws prbability plot using scipy.stats.probplot_ . See `scipy distributions`_
.. _scipy.stats.probplot:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.probplot.html
.. _scipy distributions:
https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
"""
return self._call_method("_plot_prob_df", cols=cols)
def _plot_prob_df(
self,
data: pd.DataFrame,
cols: Union[str, list] = None,
fname=None,
):
"""probability plots for one dataframe"""
assert isinstance(data, pd.DataFrame)
if cols is not None:
if isinstance(cols, str):
cols = [cols]
else:
cols = data.columns.to_list()
assert isinstance(cols, list)
data = data[cols]
for col in data.columns:
series = data[col]
self._prob_plot_series(series, fname=fname)
return
def _prob_plot_series(
self,
data: Union[pd.DataFrame, pd.Series],
fname: str = None
):
"""probability plots for one series."""
if not isinstance(data, pd.Series):
assert isinstance(data, pd.DataFrame) and data.shape[1] == 1
data = pd.Series(data)
if data.isna().sum() > 0:
print(f"removing nan values from {data.name}")
data = data.dropna()
array = data.values
cont_distros = {
"norm": stats.norm(),
"uniform": stats.uniform(),
"semicircular": stats.semicircular(),
"cauchy": stats.cauchy(),
"expon": stats.expon(),
"rayleight": stats.rayleigh(),
"moyal": stats.moyal(),
"arcsine": stats.arcsine(),
"anglit": stats.anglit(),
"gumbel_l": stats.gumbel_l(),
"gilbrat": stats.gilbrat(),
"levy": stats.levy(),
"laplace": stats.laplace(),
"bradford": stats.bradford(0.5),
"kappa3": stats.kappa3(1),
"pareto": stats.pareto(2.62)
}
fig, axis = plt.subplots(4, 4, figsize=(10, 10))
for (idx, rv), ax in zip(enumerate(cont_distros.values()), axis.flat):
if isinstance(rv, str):
_name = rv
else:
_name = rv.dist.name
(osm, osr), (slope, intercept, r) = stats.probplot(array, dist=rv,
plot=ax)
h = ax.plot(osm, osr, label="bo")
if idx % 4 == 0:
ax.set_ylabel("Ordered Values", fontsize=12)
else:
ax.set_ylabel("")
if idx > 11:
ax.set_xlabel("Theoretical Quantiles", fontsize=12)
else:
ax.set_xlabel("")
ax.set_title("")
text = f"{_name}"
ax.legend(h, [text], loc="best", fontsize=12,
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
plt.suptitle(data.name, fontsize=18)
self._save_or_show(f"probplot_{data.name}_{fname}")
return fig
def _lag_plot_series(self, series: pd.Series, n_lags: int, figsize=None,
**kwargs):
if hasattr(n_lags, '__len__'):
lags = np.array(n_lags)
n_lags = len(lags)
else:
lags = range(1, n_lags+1)
figsize = figsize or (5, 5 + n_lags*0.2)
n_rows, n_cols = 1, 1
if n_lags > 1:
n_rows = (math.ceil(n_lags/2) * 2) // 2
n_cols = 2
fig, axis = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=figsize,
sharex="all")
if n_lags == 1:
axis = np.array([axis])
for n, ax in zip(lags, axis.flat):
lag_plot(series, n, ax, **kwargs)
plt.suptitle(series.name)
self._save_or_show(fname=f"lagplot_{series.name}")
return axis
def _lag_plot_df(self, data: pd.DataFrame, n_lags: int, cols=None, **kwargs):
data = _preprocess_df(data, cols=cols)
axes = []
for col in data.columns:
axes.append(self._lag_plot_series(data[col], n_lags, **kwargs))
return axes
def lag_plot(
self,
n_lags: Union[int, list] = 1,
cols=None,
figsize=None,
**kwargs):
"""lag plot between an array and its lags
Arguments
---------
n_lags :
lag step against which to plot the data, it can be integer
or a list of integers
cols :
columns to use
figsize :
figure size
kwargs : any keyword arguments for axis.scatter
"""
return self._call_method("_lag_plot_df", n_lags=n_lags, cols=cols,
figsize=figsize, **kwargs)
def plot_ecdf(
self,
cols=None,
figsize=None,
**kwargs
):
"""plots empirical cummulative distribution function
Arguments
---------
cols :
columns to use
figsize :
kwargs :
any keyword argument for axis.plot
"""
return self._call_method("_plot_ecdf_df", cols=cols, figsize=figsize,
**kwargs)
def _plot_ecdf_df(self, data: pd.DataFrame, cols=None, figsize=None,
fname=None, **kwargs):
data = _preprocess_df(data, cols=cols)
ncols = data.shape[1]
n_rows, n_cols = 1, 1
if ncols > 1:
n_rows = (math.ceil(ncols / 2) * 2) // 2
n_cols = 2
figsize = figsize or (6, 5 + ncols * 0.2)
fig, axis = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=figsize)
if ncols == 1:
axis = np.array([axis])
for col, ax in zip(data.columns, axis.flat):
plot_ecdf(data[col], ax=ax, **kwargs)
self._save_or_show(fname=f"ecdf_{fname}")
return axis
def show_unique_vals(
self,
threshold: int = 10,
st = None,
en = None,
cols = None,
max_subplots: int = 9,
figsize: tuple = None,
**kwargs
):
"""
Shows percentage of unique/categorical values in data. Only those columns
are used in which unique values are below threshold.
Arguments
----------
threshold : int, optional
st : int, str, optional
en : int, str, optional
cols : str, list, optional
max_subplots : int, optional
figsize : tuple, optional
**kwargs :
Any keyword arguments for `easy_mpl.pie <https://easy-mpl.readthedocs.io/en/latest/plots.html#easy_mpl.pie>`_
"""
return self._call_method('_pie_df',
threshold=threshold,
st=st, en=en, cols=cols,
max_subplots=max_subplots,
figsize=figsize,
**kwargs)
def _pie_df(self, data,
threshold, st, en, cols,
max_subplots=9,
fname="",
**kwargs):
data = _preprocess_df(data, st, en, cols)
if data.shape[1] < max_subplots:
self._pie(data,
threshold = threshold,
fname=fname,
**kwargs)
else:
tot_plots = find_tot_plots(data.shape[1], max_subplots)
for i in range(len(tot_plots) - 1):
_st, _en = tot_plots[i], tot_plots[i + 1]
self._pie(data.iloc[:, _st:_en], threshold=threshold, fname=fname,
**kwargs)
return
def _pie(self, data, fname="", figsize=None, threshold=10, **kwargs):
fractions = {}
for col in data.columns:
fracts = data[col].value_counts(normalize=True).values
if len(fracts) <= threshold:
fractions[col] = fracts
else:
print(f"Ignoring {col} as it contains {len(fracts)} unique values")
if len(fractions) > 0:
nrows, ncols = get_nrows_ncols(3, len(fractions))
_, axis = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize or (12, 12))
if isinstance(axis, plt.Axes):
axis = np.array([axis])
for col, ax in zip(fractions.keys(), axis.flat):
ep.pie(fractions[col], ax=ax, show=False, **kwargs)
self._save_or_show(fname=f"pie_{fname}")
return
def plot_ecdf(x: Union[pd.Series, np.ndarray], ax=None, **kwargs):
if ax is None:
ax = plt.gca()
if isinstance(x, pd.Series):
_name = x.name
x = x.values
else:
assert isinstance(x, np.ndarray)
_name = "ecdf"
x, y = ecdf(x)
ax.plot(x, y, label=_name, **kwargs)
ax.legend()
return ax
def ecdf(x: np.ndarray):
# https://stackoverflow.com/a/37660583/5982232
xs = np.sort(x)
ys = np.arange(1, len(xs)+1)/float(len(xs))
return xs, ys
def lag_plot(series: pd.Series, lag: int, ax, **kwargs):
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel(f"y(t + {lag})")
ax.scatter(y1, y2, **kwargs)
return ax
def set_axis_paras(axis, leg_kws, label_kws, tick_kws):
axis.legend(**leg_kws)
axis.set_ylabel(axis.get_ylabel(), **label_kws)
axis.set_xlabel(axis.get_xlabel(), **label_kws)
axis.tick_params(**tick_kws)
return
def plot_style(df: pd.DataFrame, **kwargs):
if 'style' not in kwargs and df.isna().sum().sum() > 0:
kwargs['style'] = ['.' for _ in range(df.shape[1])]
return kwargs
def validate_freq(df, freq):
assert isinstance(df.index, pd.DatetimeIndex), """
index of dataframe must be pandas DatetimeIndex"""
assert freq in ["weekly", "monthly","yearly"], f"""
freq must be one of {'weekly', 'monthly', 'yearly'} but it is {freq}"""
return
def _preprocess_df(df:pd.DataFrame, st=None, en=None, cols=None,
ignore_datetime_index=False):
if cols is not None:
if isinstance(cols, str):
cols = [cols]
df = df[cols]
if st is None:
st = df.index[0]
if en is None:
en = df.index[-1]
if isinstance(st, int):
df = df.iloc[st:en]
else:
df = df.loc[st:en]
if ignore_datetime_index:
df = df.reset_index(drop=True)
return df
class SeabornNotFound(Exception):
def __str__(self):
return """
You must have seaborn library installed.
Please install seaborn using 'pip install seaborn'
""" | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/eda/_main.py | _main.py |
from typing import Any, Dict
from scipy import linalg
from ai4water.backend import np, pd, plt
def auto_corr(x, nlags, demean=True):
"""
autocorrelation like statsmodels
https://stackoverflow.com/a/51168178
"""
var = np.var(x)
if demean:
x -= np.mean(x)
corr = np.full(nlags+1, np.nan, np.float64)
corr[0] = 1.
for lag in range(1, nlags+1):
corr[lag] = np.sum(x[lag:]*x[:-lag])/len(x)/var
return corr
def pac_yw(x, nlags):
"""partial autocorrelation according to ywunbiased method"""
pac = np.full(nlags+1, fill_value=np.nan, dtype=np.float64)
pac[0] = 1.
for lag in range(1, nlags+1):
pac[lag] = ar_yw(x, lag)[-1]
return pac
def ar_yw(x, order=1, adj_needed=True, demean=True):
"""Performs autoregressor using Yule-Walker method.
Returns:
rho : np array
coefficients of AR
"""
x = np.array(x, dtype=np.float64)
if demean:
x -= x.mean()
n = len(x)
r = np.zeros(order+1, np.float64)
r[0] = (x ** 2).sum() / n
for k in range(1, order+1):
r[k] = (x[0:-k] * x[k:]).sum() / (n - k * adj_needed)
R = linalg.toeplitz(r[:-1])
rho = np.linalg.solve(R, r[1:])
return rho
def plot_autocorr(
x,
axis=None,
plot_marker=True,
show=True,
legend=None,
title=None,
xlabel=None,
vlines_colors=None,
hline_color=None,
marker_color=None,
legend_fs=None
):
if not axis:
_, axis = plt.subplots()
if plot_marker:
axis.plot(x, 'o', color=marker_color, label=legend)
if legend:
axis.legend(fontsize=legend_fs)
axis.vlines(range(len(x)), [0], x, colors=vlines_colors)
axis.axhline(color=hline_color)
if title:
axis.set_title(title)
if xlabel:
axis.set_xlabel("Lags")
if show:
plt.show()
return axis
def ccovf_np(x, y, unbiased=True, demean=True):
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
def ccf_np(x, y, unbiased=True):
"""cross correlation between two time series
# https://stackoverflow.com/a/24617594
"""
cvf = ccovf_np(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
def _missing_vals(data: pd.DataFrame) -> Dict[str, Any]:
"""
Modified after https://github.com/akanz1/klib/blob/main/klib/utils.py#L197
Gives metrics of missing values in the dataset.
Parameters
----------
data : pd.DataFrame
2D dataset that can be coerced into Pandas DataFrame
Returns
-------
Dict[str, float]
mv_total: float, number of missing values in the entire dataset
mv_rows: float, number of missing values in each row
mv_cols: float, number of missing values in each column
mv_rows_ratio: float, ratio of missing values for each row
mv_cols_ratio: float, ratio of missing values for each column
"""
data = pd.DataFrame(data).copy()
mv_rows = data.isna().sum(axis=1)
mv_cols = data.isna().sum(axis=0)
mv_total = data.isna().sum().sum()
mv_rows_ratio = mv_rows / data.shape[1]
mv_cols_ratio = mv_cols / data.shape[0]
return {
"mv_total": mv_total,
"mv_rows": mv_rows,
"mv_cols": mv_cols,
"mv_rows_ratio": mv_rows_ratio,
"mv_cols_ratio": mv_cols_ratio,
} | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/eda/utils.py | utils.py |
__all__ = ["Quadica"]
from typing import Union, List, Tuple
from ai4water.backend import pd, os, np
from ._datasets import Datasets
from .utils import check_attributes, sanity_check, check_st_en
class Quadica(Datasets):
"""
This is dataset of water quality parameters of Germany from 828 stations
from 1950 to 2018 following the work of Ebeling_ et al., 2022. The time-step
is monthly and annual but the monthly timeseries data is not continuous.
.. _Ebeling:
https://doi.org/10.5194/essd-2022-6
"""
url = {
"quadica.zip":
"https://www.hydroshare.org/resource/26e8238f0be14fa1a49641cd8a455e29/data/contents/QUADICA.zip",
"metadata.pdf":
"https://www.hydroshare.org/resource/26e8238f0be14fa1a49641cd8a455e29/data/contents/Metadata_QUADICA.pdf",
"catchment_attributes.csv":
"https://www.hydroshare.org/resource/88254bd930d1466c85992a7dea6947a4/data/contents/catchment_attributes.csv"
}
def __init__(self, path=None, **kwargs):
super().__init__(path=path, **kwargs)
self.ds_dir = path
self._download()
@property
def features(self)->list:
"""names of water quality parameters available in this dataset"""
return ['Q', 'NO3', 'NO3N', 'NMin', 'TN', 'PO4', 'PO4P', 'TP', 'DOC', 'TOC']
@property
def stattions(self)->list:
"""IDs of stations for which data is available"""
return self.metadata()['OBJECTID'].tolist()
@property
def station_names(self):
"""names of stations"""
return self.metadata()[['OBJECTID', 'Station']]
def wrtds_monthly(
self,
features:Union[str, list] = None,
stations:Union[List[int], int] = None,
st: Union[str, int, pd.DatetimeIndex] = None,
en: Union[str, int, pd.DatetimeIndex] = None,
)->pd.DataFrame:
"""
Monthly median concentrations, flow-normalized concentrations and mean
fluxes of water chemistry parameters. These are estimated using Weighted
Regressions on Time, Discharge, and Season (WRTDS)
for stations with enough data availability. This data is available for total
140 stations. The data from all stations does not start and end at the same period.
Therefore, some stations have more datapoints while others have less. The maximum
datapoints for a station are 576 while smallest datapoints are 244.
Parameters
----------
features : str/list, optional
stations : int/list optional (default=None)
name/names of satations whose data is to be retrieved.
st : optional
starting point of data. By default, the data starts from 1992-09
en : optional
end point of data. By default, the data ends at 2013-12
Returns
-------
pd.DataFrame
a dataframe of shape (50186, 47)
Examples
--------
>>> from ai4water.datasets import Quadica
>>> dataset = Quadica()
>>> df = dataset.wrtds_monthly()
"""
fname = os.path.join(self.ds_dir, "quadica", "wrtds_monthly.csv")
wrtds = pd.read_csv(fname)
wrtds.index = pd.to_datetime(wrtds['Year'].astype(str) + ' ' + wrtds['Month'].astype(str))
if features is None:
features = wrtds.columns.tolist()
elif isinstance(features, str):
features = [features]
assert isinstance(features, list)
wrtds = wrtds[features]
return check_st_en(wrtds, st, en)
def wrtds_annual(
self,
features:Union[str, list] = None,
st: Union[str, int, pd.DatetimeIndex] = None,
en: Union[str, int, pd.DatetimeIndex] = None,
)->pd.DataFrame:
"""Annual median concentrations, flow-normalized concentrations, and mean
fluxes estimated using Weighted Regressions on Time, Discharge, and Season (WRTDS)
for stations with enough data availability.
Parameters
----------
features : optional
st : optional
starting point of data. By default, the data starts from 1992
en : optional
end point of data. By default, the data ends at 2013
Returns
-------
pd.DataFrame
a dataframe of shape (4213, 46)
Examples
--------
>>> from ai4water.datasets import Quadica
>>> dataset = Quadica()
>>> df = dataset.wrtds_annual()
"""
fname = os.path.join(self.ds_dir, "quadica", "wrtds_annual.csv")
wrtds = pd.read_csv(fname)
wrtds.index = pd.to_datetime(wrtds['Year'].astype(str))
if features is None:
features = wrtds.columns.tolist()
elif isinstance(features, str):
features = [features]
assert isinstance(features, list)
wrtds = wrtds[features]
return check_st_en(wrtds, st, en)
def metadata(self)->pd.DataFrame:
"""
fetches the metadata about the stations as pandas' dataframe.
Each row represents metadata about one station and each column
represents one feature. The R2 and pbias are regression coefficients
and percent bias of WRTDS models for each parameter.
Returns
-------
pd.DataFrame
a dataframe of shape (1386, 60)
"""
fname = os.path.join(self.ds_dir, "quadica", "metadata.csv")
return pd.read_csv(fname,encoding='cp1252')
def pet(
self,
stations: Union[List[int], int] = None,
st: Union[str, int, pd.DatetimeIndex] = None,
en: Union[str, int, pd.DatetimeIndex] = None,
)->pd.DataFrame:
"""
average monthly potential evapotranspiration starting from
1950-01 to 2018-09
Examples
--------
>>> from ai4water.datasets import Quadica
>>> dataset = Quadica()
>>> df = dataset.pet() # -> (828, 1386)
"""
fname = os.path.join(self.ds_dir, "quadica", "pet_monthly.csv")
pet = pd.read_csv(fname, parse_dates=[['Year', 'Month']], index_col='Year_Month')
if stations is not None:
stations = [str(stn) for stn in stations]
pet = pet[stations]
return check_st_en(pet, st, en)
def avg_temp(
self,
stations: Union[List[int], int] = None,
st: Union[str, int, pd.DatetimeIndex] = None,
en: Union[str, int, pd.DatetimeIndex] = None,
)->pd.DataFrame:
"""
monthly median average temperatures starting from 1950-01 to 2018-09
parameters
-----------
stations :
name of stations for which data is to be retrieved. By default, data
for all stations is retrieved.
st : optional
starting point of data. By default, the data starts from 1950-01
en : optional
end point of data. By default, the data ends at 2018-09
Returns
-------
pd.DataFrame
a pandas dataframe of shape (time_steps, stations). With default input
arguments, the shape is (828, 1386)
Examples
--------
>>> from ai4water.datasets import Quadica
>>> dataset = Quadica()
>>> df = dataset.avg_temp() # -> (828, 1388)
"""
fname = os.path.join(self.ds_dir, "quadica", "tavg_monthly.csv")
temp = pd.read_csv(fname, parse_dates=[['Year', 'Month']], index_col='Year_Month')
if stations is not None:
stations = [str(stn) for stn in stations]
temp = temp[stations]
return check_st_en(temp, st, en)
def precipitation(
self,
stations: Union[List[int], int] = None,
st: Union[str, int, pd.DatetimeIndex] = None,
en: Union[str, int, pd.DatetimeIndex] = None,
)->pd.DataFrame:
""" sums of precipitation starting from 1950-01 to 2018-09
parameters
-----------
stations :
name of stations for which data is to be retrieved. By default, data
for all stations is retrieved.
st : optional
starting point of data. By default, the data starts from 1950-01
en : optional
end point of data. By default, the data ends at 2018-09
Returns
-------
pd.DataFrame
a dataframe of shape (828, 1388)
Examples
--------
>>> from ai4water.datasets import Quadica
>>> dataset = Quadica()
>>> df = dataset.precipitation() # -> (828, 1388)
"""
fname = os.path.join(self.ds_dir, "quadica", "pre_monthly.csv")
pcp = pd.read_csv(fname, parse_dates=[['Year', 'Month']], index_col='Year_Month')
if stations is not None:
stations = [str(stn) for stn in stations]
pcp = pcp[stations]
return check_st_en(pcp, st, en)
def monthly_medians(
self,
features:Union[List[str], str] = None,
stations: Union[List[int], int] = None,
)->pd.DataFrame:
"""
This function reads the `c_months.csv` file which contains the monthly
medians over the whole time series of water quality variables
and discharge
parameters
----------
features : list/str, optional, (default=None)
name/names of features
stations : list/int, optional (default=None)
stations for which
Returns
-------
pd.DataFrame
a dataframe of shape (16629, 18). 15 of the 18 columns represent a
water chemistry parameter. 16629 comes from 1386*12 where 1386 is stations
and 12 is months.
"""
fname = os.path.join(self.ds_dir, "quadica", "c_months.csv")
df = pd.read_csv(fname)
if features is not None:
df = df[features]
if stations is not None:
df = df.loc[df['OBJECTID'].isin(stations)]
return df
def annual_medians(
self,
)->pd.DataFrame:
"""Annual medians over the whole time series of water quality variables
and discharge
Returns
-------
pd.DataFrame
a dataframe of shape (24393, 18)
"""
fname = os.path.join(self.ds_dir, "quadica", "c_annual.csv")
return pd.read_csv(fname)
def fetch_annual(self):
raise NotImplementedError
def catchment_attributes(
self,
features:Union[List[str], str] = None,
stations: Union[List[int], int] = None,
)->pd.DataFrame:
"""
Returns static physical catchment attributes in the form of dataframe.
parameters
----------
features : list/str, optional, (default=None)
name/names of static attributes to fetch
stations : list/int, optional (default=None)
name/names of stations whose static/physical features are to be read
Returns
--------
pd.DataFrame
a pandas dataframe of shape (stations, features). With default input arguments,
shape is (1386, 113)
Examples
---------
>>> from ai4water.datasets import Quadica
>>> dataset = Quadica()
>>> cat_features = dataset.catchment_attributes()
... # get attributes of only selected stations
>>> dataset.catchment_attributes(stations=[1,2,3])
"""
fname = os.path.join(self.ds_dir, "catchment_attributes.csv")
df = pd.read_csv(fname, encoding='unicode_escape')
if features:
assert isinstance(features, list)
df = df[features]
if stations is not None:
assert isinstance(stations, (list, np.ndarray))
df = df.loc[df['OBJECTID'].isin(stations)]
return df
def fetch_monthly(
self,
features:Union[List[str], str] = None,
stations:Union[List[int], int] = None,
median:bool = True,
fnc:bool = True,
fluxes:bool = True,
precipitation:bool = True,
avg_temp:bool = True,
pet:bool = True,
only_continuous:bool = True,
cat_features:bool = True,
max_nan_tol:Union[int, None] = 0,
)->Tuple[pd.DataFrame, pd.DataFrame]:
"""
Fetches monthly concentrations of water quality parameters.
+----------+----------------------------------------------------+
| median_Q | Median discharge |
+----------+----------------------------------------------------+
| median_COMPOUND | Median concentration from grab sampling data |
+----------+----------------------------------------------------+
| median_C | Median concentration from WRTDS |
+----------+----------------------------------------------------+
| median_FNC | Median flow-normalized concentration from WRTDS |
+----------+----------------------------------------------------+
| mean_Flux | Mean flux from WRTDS |
+----------+----------------------------------------------------+
| mean_FNFlux | Mean flow-normalized flux from WRTDS |
+----------+----------------------------------------------------+
parameters
----------
features : str/list, optional (default=None)
name or names of water quality parameters to fetch. By default
following parameters are considered
- ``NO3``
- ``NO3N``
- ``TN``
- ``Nmin``
- ``PO4``
- ``PO4P``
- ``TP``
- ``DOC``
- ``TOC``
stations : int/list, optional (default=None)
name or names of stations whose data is to be fetched
median : bool, optional (default=True)
whether to fetch median concentration values or not
fnc : bool, optional (default=True)
whether to fetch flow normalized concentrations or not
fluxes : bool, optional (default=True)
Setting this to true will add two features i.e. mean_Flux_FEATURE
and mean_FNFlux_FEATURE
precipitation : bool, optional (default=True)
whether to fetch average monthly precipitation or not
avg_temp : bool, optional (default=True)
whether to fetch average monthly temperature or not
pet : bool, optional (default=True)
whether to fether potential evapotranspiration data or not
only_continuous : bool, optional (default=True)
If true, will return data for only those stations who have continuos
monthly timeseries data from 1993-01-01 to 2013-01-01.
cat_features : bool, optional (default=True)
whether to fetch catchment features or not.
max_nan_tol : int, optional (default=0)
setting this value to 0 will remove the whole time-series with any
missing values. If None, no time-series with NaNs values will be removed.
Returns
--------
tuple
two dataframes whose length is same but the columns are different
- a pandas dataframe of timeseries of parameters (stations*timesteps, dynamic_features)
- a pandas dataframe of static features (stations*timesteps, catchment_features)
Examples
--------
>>> from ai4water.datasets import Quadica
>>> dataset = Quadica()
>>> mon_dyn, mon_cat = dataset.fetch_monthly(max_nan_tol=None)
... # However, mon_dyn contains data for all parameters and many of which have
... # large number of nans. If we want to fetch data only related to TN without any
... # missing value, we can do as below
>>> mon_dyn_tn, mon_cat_tn = dataset.fetch_monthly(features="TN", max_nan_tol=0)
... # if we want to find out how many catchments are included in mon_dyn_tn
>>> len(mon_dyn_tn['OBJECTID'].unique())
... # 25
"""
if features is None:
features = self.features
if isinstance(features, str):
features = [features]
assert isinstance(features, list)
_wrtd_features = ['median_Q']
for feat in features:
if fluxes:
_wrtd_features += self._consider_fluxes(feat)
if median:
_wrtd_features += self._consider_median(feat)
if fnc:
_wrtd_features += self._consider_fnc(feat)
_wrtd_features = list(set(_wrtd_features))
_features = _wrtd_features.copy()
df = self.wrtds_monthly(features=_wrtd_features + ['OBJECTID'], stations=stations)
if only_continuous:
groups = []
for idx, grp in df.groupby('OBJECTID'):
# there are 252 months from 1993 to 2013
if len(grp.loc["19930101": "20131201"]) == 252:
groups.append(grp.loc["19930101": "20131201"])
df = pd.concat(groups)
#df[_med_features] = self.monthly_medians(features=_features, stations=stations)
if max_nan_tol is not None:
groups = []
for idx, grp in df.groupby('OBJECTID'):
if grp.isna().sum().sum() <= max_nan_tol:
groups.append(grp)
if len(groups) == 0:
raise ValueError(f"""
No data with nans less or equal to {max_nan_tol} is found.
Please increase the value of "max_nan_tol" or choose a different parameter.
""")
df = pd.concat(groups)
if avg_temp:
temp = self.avg_temp(df['OBJECTID'].unique(), "19930101", "20131201")
stns = np.array([np.repeat(int(val), len(temp)) for val in temp.columns]).reshape(-1, )
temp = np.concatenate([temp[col] for col in temp.columns])
assert np.allclose(stns, df['OBJECTID'].values)
df['avg_temp'] = temp
if precipitation:
pcp = self.precipitation(df['OBJECTID'].unique(), "19930101", "20131201")
stns = np.array([np.repeat(int(val), len(pcp)) for val in pcp.columns]).reshape(-1, )
pcp = np.concatenate([pcp[col] for col in pcp.columns])
assert np.allclose(stns, df['OBJECTID'].values)
df['precip'] = pcp
if pet:
pet = self.pet(df['OBJECTID'].unique(), "19930101", "20131201")
stns = np.array([np.repeat(int(val), len(pet)) for val in pet.columns]).reshape(-1, )
pet = np.concatenate([pet[col] for col in pet.columns])
assert np.allclose(stns, df['OBJECTID'].values)
df['pet'] = pet
if cat_features:
cat_features = self.catchment_attributes(stations=df['OBJECTID'].unique())
n = len(df) / len(df['OBJECTID'].unique())
# repeat each row of cat_features n times
cat_features = cat_features.loc[cat_features.index.repeat(n)]
assert np.allclose(cat_features['OBJECTID'].values, df['OBJECTID'].values)
return df, cat_features
def _consider_median(self, feature):
d = {
'Q': ['median_Q'],
'DOC': ['median_C_DOC'],
'TOC': ['median_C_TOC'],
'TN': ['median_C_TN'],
'TP': ['median_C_TP'],
'PO4': ['median_C_PO4'],
'PO4P': [],
'NMin': ['median_C_NMin'],
'NO3': ['median_C_NO3'],
'NO3N': [],
}
return d[feature]
def _consider_fnc(self, feature):
d = {
'Q': ['median_Q'],
'DOC': ['median_FNC_DOC'],
'TOC': ['median_FNC_TOC'],
'TN': ['median_FNC_TN'],
'TP': ['median_FNC_TP'],
'PO4': ['median_FNC_PO4'],
'PO4P': [],
'NMin': ['median_FNC_NMin'],
'NO3': ['median_FNC_NO3'],
'NO3N': [],
}
return d[feature]
def _consider_fluxes(self, feature):
d = {
'Q': ['median_Q'],
'DOC': ['mean_Flux_DOC', 'mean_FNFlux_DOC'],
'TOC': ['mean_Flux_TOC', 'mean_FNFlux_TOC'],
'TN': ['mean_Flux_TN', 'mean_FNFlux_TN'],
'TP': ['mean_Flux_TP', 'mean_FNFlux_TP'],
'PO4': ['mean_Flux_PO4', 'mean_FNFlux_PO4'],
'PO4P': [],
'NMin': ['mean_Flux_NMin', 'mean_FNFlux_NMin'],
'NO3': ['mean_Flux_NO3', 'mean_FNFlux_NO3'],
'NO3N': [],
}
return d[feature]
def to_DataSet(
self,
target:str = "TP",
input_features:list = None,
split:str = "temporal",
lookback:int = 24,
**ds_args
):
"""
This function prepares data for machine learning prediction problem. It
returns an instance of ai4water.preprocessing.DataSetPipeline which can be
given to model.fit or model.predict
parameters
----------
target : str, optional (default="TN")
parameter to consider as target
input_features : list, optional
names of input features
split : str, optional (default="temporal")
if ``temporal``, validation and test sets are taken from the data of
each station and then concatenated. If ``spatial``, training
validation and test is decided based upon stations.
lookback : int
**ds_args :
key word arguments
Returns
-------
ai4water.preprocessing.DataSet
an instance of DataSetPipeline
Example
--------
>>> from ai4water.datasets import Quadica
... # initialize the Quadica class
>>> dataset = Quadica()
... # define the input features
>>> inputs = ['median_Q', 'OBJECTID', 'avg_temp', 'precip', 'pet']
... # prepare data for TN as target
>>> dsp = dataset.to_DataSet("TN", inputs, lookback=24)
"""
assert split in ("temporal", "spatial")
from ai4water.preprocessing import DataSet, DataSetPipeline
dyn, cat = self.fetch_monthly(features=target, max_nan_tol=0)
if input_features is None:
input_features = ['median_Q', 'OBJECTID', 'avg_temp', 'precip', 'pet']
output_features = [f'median_C_{target}']
_ds_args = {
'val_fraction': 0.2,
'train_fraction': 0.7
}
if ds_args is None:
ds_args = dict()
_ds_args.update(ds_args)
dsets = []
for idx, grp in dyn.groupby("OBJECTID"):
ds = DataSet(data=grp,
ts_args={'lookback': lookback},
input_features=input_features,
output_features=output_features,
**_ds_args)
dsets.append(ds)
return DataSetPipeline(*dsets) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/datasets/_quadica.py | _quadica.py |
import sys
import json
import signal
import time
import hashlib
from contextlib import contextmanager
from .utils import download
from ai4water.backend import requests, os
abort_signal = False
abort_counter = 0
exceptions = False
def ctrl_c(func):
signal.signal(signal.SIGINT, func)
return func
@ctrl_c
def handle_ctrl_c(*args, **kwargs):
global abort_signal
global abort_counter
global exceptions
abort_signal = True
abort_counter += 1
if abort_counter >= 2:
if exceptions:
raise Exception('\n Immediate abort. There might be unfinished files.')
else:
sys.exit(1)
#see https://stackoverflow.com/questions/431684/how-do-i-change-the-working-directory-in-python/24176022#24176022
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def check_hash(filename, checksum):
algorithm, value = checksum.split(':')
if not os.path.exists(filename):
return value, 'invalid'
h = hashlib.new(algorithm)
with open(filename, 'rb') as f:
while True:
data = f.read(4096)
if not data:
break
h.update(data)
digest = h.hexdigest()
return value, digest
def download_from_zenodo(
outdir,
doi,
cont=False,
tolerate_error=False,
include:list = None,
**kwargs
):
"""
to suit the requirements of this package.
:param outdir: Output directory, created if necessary. Default: current directory.
:param doi: str, Zenodo DOI
:param cont: True, Do not continue previous download attempt. (Default: continue.)
:param tolerate_error: False, Continue with next file if error happens.
:param include : files to download. Files which are not in include will not be
downloaded.
:param kwargs:
sandbox: bool, Use Zenodo Sandbox URL.
timeout: int, Connection time-out. Default: 15 [sec].
pause: float, Seconds to wait before retry attempt, e.g. 0.5
retry: int, Number of times to Retry on error.
"""
_wget = kwargs.get('wget', None)
md5 = kwargs.get('md5', False)
keep = kwargs.get('keep', False)
timeout = kwargs.get('timeout', 15)
sandbox = kwargs.get('sandbox', False)
pause = kwargs.get('pause', 0.5)
retry = kwargs.get('retry', 0)
with cd(outdir):
url = doi
if not url.startswith('http'):
url = 'https://doi.org/' + url
try:
r = requests.get(url, timeout=timeout)
except requests.exceptions.ConnectTimeout:
raise TimeoutError("Connection timeout.")
except Exception:
raise ConnectionError
if not r.ok:
raise ValueError(f'DOI {doi} could not be resolved. Try again, or use record ID.')
recordID = r.url.split('/')[-1]
if not sandbox:
url = 'https://zenodo.org/api/records/'
else:
url = 'https://sandbox.zenodo.org/api/records/'
try:
r = requests.get(url + recordID, timeout=timeout)
except requests.exceptions.ConnectTimeout:
raise TimeoutError('Connection timeout during metadata reading.')
except Exception:
raise ConnectionError('Connection error during metadata reading.')
if r.ok:
js = json.loads(r.text)
files = js['files']
if include:
assert isinstance(include, list)
filenames = [f['key'] for f in files]
assert all([file in filenames for file in include]), f"invlid {include}"
# only consider those files which are in include
files = [file for file in files if file['key'] in include]
total_size = sum(f['size'] for f in files)
if md5 is not None:
with open('md5sums.txt', 'wt') as md5file:
for f in files:
fname = f['key']
checksum = f['checksum'].split(':')[-1]
md5file.write(f'{checksum} {fname}\n')
if _wget is not None:
if _wget == '-':
for f in files:
link = f['links']['self']
print(link)
else:
with open(_wget, 'wt') as wgetfile:
for f in files:
fname = f['key']
link = 'https://zenodo.org/record/{}/files/{}'.format(
recordID, fname
)
wgetfile.write(link + '\n')
else:
print('Title: {}'.format(js['metadata']['title']))
print('Keywords: ' +
(', '.join(js['metadata'].get('keywords', []))))
print('Publication date: ' + js['metadata']['publication_date'])
print('DOI: ' + js['metadata']['doi'])
print('Total size: {:.1f} MB'.format(total_size / 2 ** 20))
for f in files:
if abort_signal:
print('Download aborted with CTRL+C.')
print('Already successfully downloaded files are kept.')
break
link = f['links']['self']
size = f['size'] / 2 ** 20
print()
print(f'Link: {link} size: {size:.1f} MB')
fname = f['key']
checksum = f['checksum']
remote_hash, local_hash = check_hash(fname, checksum)
if remote_hash == local_hash and cont:
print(f'{fname} is already downloaded correctly.')
continue
for _ in range(retry + 1):
try:
filename = download(link)
except Exception as e:
print(' Download error.')
time.sleep(pause)
else:
break
else:
print(' Too many errors.')
if not tolerate_error:
raise Exception('Download is aborted. Too many errors')
print(f' Ignoring {filename} and downloading the next file.')
continue
h1, h2 = check_hash(filename, checksum)
if h1 == h2:
print(f'Checksum is correct. ({h1})')
else:
print(f'Checksum is INCORRECT!({h1} got:{h2})')
if not keep:
print(' File is deleted.')
os.remove(filename)
else:
print(' File is NOT deleted!')
if not tolerate_error:
sys.exit(1)
else:
print('All files have been downloaded.')
else:
raise Exception('Record could not get accessed.') | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/datasets/download_zenodo.py | download_zenodo.py |
# https://zenodo.org/record/3712407#.YExYDtyRWUk
# https://zenodo.org/record/3844201#.YExYi9yRWUk
# https://zenodo.org/record/1471322#.YExYftyRWUk
# https://zenodo.org/record/3961605#.YExYcdyRWUk
# https://zenodo.org/record/1452383#.YExZRdyRWUk
# https://zenodo.org/record/4428151#.YExZPdyRWUk
# https://zenodo.org/record/3903238#.YExZItyRWUk
# https://zenodo.org/record/3670864#.YExZFdyRWUk
# https://zenodo.org/record/3834623#.YExY7tyRWUk
# https://zenodo.org/record/4029572#.YExY5NyRWUk
# https://zenodo.org/record/4552842#.YExaR9yRWUk
# https://zenodo.org/record/3466097#.YExaQ9yRWUk
# https://zenodo.org/record/4327078#.YExaCdyRWUk
# https://zenodo.org/record/3712397#.YExbsdyRWUk
# https://zenodo.org/record/3560706#.YExeztyRWUk
# https://zenodo.org/record/3698998#.YExekdyRWUk
# https://zenodo.org/record/3564237#.YExlh9yRWUl
# https://zenodo.org/record/581145#.YExeV9yRWUk
# https://zenodo.org/record/3978225#.YExeEtyRWUk
# https://zenodo.org/record/3763766#.YExdntyRWUk
# https://zenodo.org/record/3744217#.YExdi9yRWUk
# https://zenodo.org/record/3948568#.YExdeNyRWUk
# https://zenodo.org/record/3538207#.YExdbtyRWUk
# https://zenodo.org/record/1486058#.YExc-dyRWUk
# https://zenodo.org/record/3561032#.YExc7tyRWUk
# https://zenodo.org/record/1466038#.YExc3dyRWUk
# https://zenodo.org/record/581186#.YExcz9yRWUk
# https://zenodo.org/record/4572636#.YExcwNyRWUk
# https://zenodo.org/record/1267837#.YExcZNyRWUk
# https://zenodo.org/record/3808223#.YExcX9yRWUk
# https://zenodo.org/record/4447435#.YExcWtyRWUk
# https://zenodo.org/record/1300354#.YExcVdyRWUk
# https://zenodo.org/record/4308036#.YExcJdyRWUk
# https://zenodo.org/record/3459610#.YExhNNyRWUk
# https://zenodo.org/record/3763342#.YExhCdyRWUk
# https://zenodo.org/record/4559571#.YExhBNyRWUk
# https://zenodo.org/record/3663630#.YExg89yRWUk
# https://zenodo.org/record/4382937#.YExg7dyRWUk
# https://zenodo.org/record/3876148#.YExgUdyRWUk
# https://zenodo.org/record/3982962#.YExgTdyRWUk
# https://zenodo.org/record/2559480#.YExvWtyRWUk
# https://zenodo.org/record/4094684#.YExvS9yRWUk
# https://zenodo.org/record/2596929#.YExvP9yRWUk
# https://zenodo.org/record/977773#.YExvEtyRWUk
# https://zenodo.org/record/3520146#.YExu_tyRWUk
# https://zenodo.org/record/3836648#.YExu09yRWUk
# https://zenodo.org/record/4290294#.YExo5tyRWUk
# https://zenodo.org/record/2728636#.YEx4EdyRWUk
# https://zenodo.org/record/3581187#.YEx5CNyRWUk
# https://zenodo.org/record/3946242#.YEx5FtyRWUk
# https://zenodo.org/record/883100#.YEx5L9yRWUk
# https://zenodo.org/record/3239401#.YEx5gtyRWUk
# https://zenodo.org/record/4183611#.YEx5vNyRWUk
# https://zenodo.org/record/4559696#.YEx5xdyRWUk
# https://zenodo.org/record/3776011#.YEx6YdyRWUk
# https://zenodo.org/record/4315647#.YEx6v9yRWUk
# https://zenodo.org/record/1185084#.YEx77NyRWUk
# https://zenodo.org/record/4271209#.YEx7z9yRWUk
# https://zenodo.org/record/4570780#.YEx7y9yRWUk
# https://zenodo.org/record/3593395#.YEx7x9yRWUk
# https://zenodo.org/record/3632501#.YEx7qtyRWUk
# https://zenodo.org/record/1122635#.YEx7ndyRWUk
# https://zenodo.org/record/3893897#.YEx7gNyRWUk
# https://zenodo.org/record/4395737#.YEx7a9yRWUk
# https://zenodo.org/record/3779473#.YEx7aNyRWUk
# https://zenodo.org/record/1226394#.YEx7O9yRWUk
# https://zenodo.org/record/4391461#.YEx7MtyRWUk
# https://zenodo.org/record/4247833#.YEx7HtyRWUk
# https://zenodo.org/record/1486058#.YEx7G9yRWUk
# https://zenodo.org/record/3928587#.YEx7E9yRWUk
# https://zenodo.org/record/4341521#.YEx7DdyRWUk
# https://zenodo.org/record/3974871#.YEx7CdyRWUk
# https://zenodo.org/record/1298526#.YEx7B9yRWUk
# https://zenodo.org/record/57293#.YEx6_dyRWUk
# https://zenodo.org/record/4268711#.YEx6-9yRWUk
# https://zenodo.org/record/322827#.YEx69tyRWUk
# https://zenodo.org/record/1050301#.YEx6y9yRWUk
# https://zenodo.org/record/4734372#.YKc9QKGRWUk
# https://www.nature.com/articles/s41597-019-0288-y#Abs1
# https://catalogue-imos.aodn.org.au/geonetwork/srv/api/records/9e2ba32a-5da3-4ea5-b750-e6279680dd71
#https://daac.ornl.gov/cgi-bin/dsviewer.pl?ds_id=1566
# https://doi.pangaea.de/10.1594/PANGAEA.885860
# https://doi.pangaea.de/10.1594/PANGAEA.913939
# https://www.nature.com/articles/s41597-019-0346-5#Sec8
# ETP
# https://zenodo.org/record/4038399#.YEx6INyRWUk
# https://zenodo.org/record/4601596#.YEx6M9yRWUk
# https://zenodo.org/record/3981919#.YEx6ONyRWUk
# https://zenodo.org/record/4271331#.YEx6PdyRWUk
# https://zenodo.org/record/3726856#.YEx6RdyRWUk
# https://zenodo.org/record/4580292#.YEx6TtyRWUk
# https://zenodo.org/record/1044306#.YEx6UNyRWUk
# https://zenodo.org/record/3891936#.YEx7S9yRWUk
# https://zenodo.org/record/4060319#.YEx7QtyRWUk
# rr
# https://zenodo.org/record/3341592#.YEx5RtyRWUk
# https://zenodo.org/record/3931582#.YEx5W9yRWUk
# https://zenodo.org/record/3528098#.YEx64NyRWUk
# https://hess.copernicus.org/articles/25/3105/2021/
# https://www.nature.com/articles/s41597-019-0282-4#Sec12
# https://www.nature.com/articles/sdata201880#Tab3
# https://edg.epa.gov/metadata/catalog/search/resource/details.page?uuid=https://doi.org/10.23719/1378947
# air
# https://zenodo.org/record/4311854#.YExpwNyRWUk
# https://zenodo.org/record/4281271#.YExpYNyRWUk
# ocean
# https://zenodo.org/record/4600696#.YExpSdyRWUk
# Water Quality
# https://zenodo.org/record/1495558#.YExqFtyRWUk
#https://www.nature.com/articles/sdata201798#Sec18
# https://www.nature.com/articles/s41597-020-0455-1#Sec11
# Flow
# https://zenodo.org/record/3941890#.YExp5NyRWUk
# https://zenodo.org/record/1206188#.YExn-dyRWUk
# https://zenodo.org/record/4394503#.YEx6ndyRWUk
# https://zenodo.org/record/3240954#.YEx6s9yRWUk
# Groundwater
# https://zenodo.org/record/3887120#.YExq1tyRWUk
# https://zenodo.org/record/3928587#.YExnztyRWUk
# https://zenodo.org/record/1158631#.YEx7ZdyRWUk
# https://zenodo.org/record/4139912#.YEx7XdyRWUk
# Weather
# https://zenodo.org/record/3678799#.YExsP9yRWUk
# https://zenodo.org/record/3679247#.YExsOdyRWUk
# https://zenodo.org/record/3678789#.YExsN9yRWUk
# https://zenodo.org/record/4567325#.YExqjtyRWUk
# https://zenodo.org/record/3549899#.YExqNdyRWUk
# https://zenodo.org/record/4319773#.YExoq9yRWUk
# https://zenodo.org/record/4319770#.YExooNyRWUk
# https://zenodo.org/record/4319756#.YExnl9yRWUk
# https://zenodo.org/record/854619#.YExnityRWUk
# https://essd.copernicus.org/articles/13/1289/2021/
# https://essd.copernicus.org/articles/13/1307/2021/
# https://www.tr32db.uni-koeln.de/search/view.php?dataID=1786
# https://doi.org/10.3334/ORNLDAAC/1840
#DWD
# https://opendata.dwd.de/climate_environment/CDC/observations_germany/
# geologic
# https://zenodo.org/record/4536561#.YExpQNyRWUk
# https://zenodo.org/record/2549499#.YExo09yRWUk
# 2D time series datasets
# https://zenodo.org/record/1135230#.YExYotyRWUk
# https://zenodo.org/record/2630456#.YExb4tyRWUk
# https://zenodo.org/record/4559368#.YExd1NyRWUk
# https://zenodo.org/record/4542076#.YExuxtyRWUk
# https://zenodo.org/record/4489056#.YExoBtyRWUk
# https://zenodo.org/record/1157344#.YExnqNyRWUk
# https://www.nature.com/articles/s41597-020-0450-6
# https://www.nature.com/articles/sdata201542#Abs1
# https://www.nature.com/articles/s41597-019-0228-x
# https://zenodo.org/record/4058167
# https://www.nature.com/articles/sdata2018224#Sec10
# soil
# https://www.tr32db.uni-koeln.de/search/view.php?dataID=1839
# https://www.tr32db.uni-koeln.de/search/view.php?dataID=1838
# https://www.tr32db.uni-koeln.de/search/view.php?dataID=1837
# https://www.tr32db.uni-koeln.de/search/view.php?dataID=1760
# https://www.tr32db.uni-koeln.de/search/view.php?dataID=1761
import glob
import warnings
from typing import Union, Tuple, Any, Optional, List
try:
from shapely.geometry import shape, mapping
from shapely.ops import unary_union
except (ModuleNotFoundError, OSError):
shape, mapping, unary_union = None, None, None
from ai4water.backend import os, random, np, pd
from ai4water.backend import netCDF4
from ai4water.backend import xr
from .download_pangaea import PanDataSet
from .utils import download_all_http_directory
from .utils import maybe_download, download_and_unzip, unzip_all_in_dir, download
from .utils import check_attributes, check_st_en
from .utils import encode_column, LabelEncoder, OneHotEncoder
SEP = os.sep
# TODO, add visualization
# TODO all available datasets should be available using a single interface instead of importing each separately
DATASETS = [
'ISWDC',
'SEBAL_ET_CHINA',
'GeoChemMatane',
'PrecipBerlin',
'HydroChemJava',
'WaterChemVictoriaLakes',
'WaterChemEcuador',
'HydrocarbonsGabes',
'SedimentAmersee',
'FlowTetRiver',
'HoloceneTemp',
'RiverTempEroo',
'StreamTempSpain',
'FlowSedDenmark',
'FlowSamoylov',
'EtpPcpSamoylov',
'RiverIsotope',
'WQCantareira',
'RiverTempSpain',
'HydrometricParana',
'FlowBenin',
'YamaguchiClimateJp',
'WQJordan2',
'WQJordan',
'Weisssee'
]
class Datasets(object):
"""
This is the base class for datasets
Note:
We don't host datasets. Each dataset is downloaded fromt he target remote
server and saved into local disk.
"""
def __init__(self,
name=None,
units=None,
path:str = None
):
"""
Arguments:
name : str (default=None)
name of dataset
units : str, (default=None)
the unit system being used
path : str (default=None)
path where the data is available (manually downloaded).
If None, it will be downloaded
"""
if name is None:
name = self.__class__.__name__
if units is not None:
assert units in ['si', 'imperial', 'metric']
self.units = units
self.name = name
@property
def url(self):
raise NotImplementedError(f"url must be defined.")
@property
def base_ds_dir(self):
"""Base datasets directory"""
return os.path.join(os.path.dirname(__file__), 'data')
@property
def ds_dir(self):
return self._ds_dir
@ds_dir.setter
def ds_dir(self, path=None):
if path is None:
_dir = os.path.join(self.base_ds_dir, self.__class__.__name__)
else:
_dir = path
if not os.path.exists(_dir):
os.makedirs(_dir)
self._ds_dir = _dir
return
def _download(self, overwrite=False, **kwargs):
"""Downloads the dataset. If already downloaded, then
Parameters
-----------
overwrite : bool
**kwargs :
any keyword arguments for maybe_download function
"""
maybe_download(self.ds_dir, overwrite=overwrite,
url=self.url, name=self.name, **kwargs)
return
def _download_and_unzip(self):
download_and_unzip(self.ds_dir, self.url)
return
def download_from_pangaea(self, overwrite=False):
if os.path.exists(self.ds_dir):
if overwrite:
print("removing previously downloaded data and downloading again")
else:
print(f"The path {self.ds_dir} already exists.")
self.data_files = [f for f in os.listdir(self.ds_dir) if f.endswith('.txt')]
self.metadata_files = [f for f in os.listdir(self.ds_dir) if f.endswith('.json')]
if len(self.data_files) == 0:
print(f"The path {self.ds_dir} is empty so downloading the files again")
self._download_from_pangaea()
else:
self._download_from_pangaea()
return
def _download_from_pangaea(self):
self.data_files = []
self.metadata_files = []
ds = PanDataSet(self.url)
kids = ds.children()
if len(kids) > 1:
for kid in kids:
kid_ds = PanDataSet(kid)
fname = kid_ds.download(self.ds_dir)
self.metadata_files.append(fname + '._metadata.json')
self.data_files.append(fname + '.txt')
else:
fname = ds.download(self.ds_dir)
self.metadata_files.append(fname + '._metadata.json')
self.data_files.append(fname + '.txt')
return
class Weisssee(Datasets):
dynamic_attributes = ['Precipitation_measurements',
'long_wave_upward_radiation',
'snow_density_at_30cm',
'long_wave_downward_radiation'
]
url = '10.1594/PANGAEA.898217'
def __init__(self, path=None, overwrite=False, **kwargs):
super(Weisssee, self).__init__(path=path, **kwargs)
self.ds_dir = path
self.download_from_pangaea(overwrite=overwrite)
def fetch(self, **kwargs):
"""
Examples
--------
>>> from ai4water.datasets import Weisssee
>>> dataset = Weisssee()
>>> data = dataset.fetch()
"""
data = {}
for f in self.data_files:
fpath = os.path.join(self.ds_dir, f)
df = pd.read_csv(fpath, **kwargs)
if 'index_col' in kwargs:
df.index = pd.to_datetime(df.index)
data[f.split('.txt')[0]] = df
return data
class ETP_CHN_SEBAL(Datasets):
url = "https://zenodo.org/record/4218413#.YBNhThZS-Ul"
class ISWDC(Datasets):
url = "https://zenodo.org/record/2616035#.YBNl5hZS-Uk"
class WQJordan(Weisssee):
"""Jordan River water quality data of 9 variables for two variables."""
url = 'https://doi.pangaea.de/10.1594/PANGAEA.919103'
class WQJordan2(Weisssee):
"""Stage and Turbidity data of Jordan River"""
url = '10.1594/PANGAEA.919104'
class YamaguchiClimateJp(Weisssee):
"""Daily climate and flow data of Japan from 2006 2018"""
url = "https://doi.pangaea.de/10.1594/PANGAEA.909880"
class FlowBenin(Weisssee):
"""Flow data"""
url = "10.1594/PANGAEA.831196"
class HydrometricParana(Weisssee):
"""Daily and monthly water level and flow data of Parana river Argentina
from 1875 to 2017."""
url = "https://doi.pangaea.de/10.1594/PANGAEA.882613"
class RiverTempSpain(Weisssee):
"""Daily mean stream temperatures in Central Spain for different periods."""
url = "https://doi.pangaea.de/10.1594/PANGAEA.879494"
class WQCantareira(Weisssee):
"""Water quality and quantity primary data from field campaigns in the Cantareira Water Supply System,
period Oct. 2013 - May 2014"""
url = "https://doi.pangaea.de/10.1594/PANGAEA.892384"
class RiverIsotope(Weisssee):
"""399 δ18O and δD values in river surface waters of Indian River"""
url = "https://doi.pangaea.de/10.1594/PANGAEA.912582"
class EtpPcpSamoylov(Weisssee):
"""Evpotranspiration and Precipitation at station TOWER on Samoylov Island Russia
from 20110524 to 20110819 with 30 minute frequency"""
url = "10.1594/PANGAEA.811076"
class FlowSamoylov(Weisssee):
"""Net lateral flow at station INT2 on Samoylov Island Russia
from 20110612 to 20110819 with 30 minute frequency"""
url = "10.1594/PANGAEA.811072"
class FlowSedDenmark(Weisssee):
"""Flow and suspended sediment concentration fields over tidal bedforms, ADCP profile"""
url = "10.1594/PANGAEA.841977"
class StreamTempSpain(Weisssee):
"""Daily Mean Stream Temperature at station Tormes3, Central Spain from 199711 to 199906."""
url = "https://doi.pangaea.de/10.1594/PANGAEA.879507"
class RiverTempEroo(Weisssee):
"""Water temperature records in the Eroo River and some tributaries (Selenga River basin, Mongolia, 2011-2012)"""
url = "10.1594/PANGAEA.890070"
class HoloceneTemp(Weisssee):
"""Holocene temperature reconstructions for northeastern North America and the northwestern Atlantic,
core Big_Round_Lake."""
url = "10.1594/PANGAEA.905446"
class FlowTetRiver(Weisssee):
"""Daily mean river discharge at meteorological station Perpignan upstream, Têt basin France from 1980
to 2000."""
url = "10.1594/PANGAEA.226925"
class SedimentAmersee(Weisssee):
"""Occurence of flood laminae in sediments of Ammersee"""
url = "10.1594/PANGAEA.746240"
class HydrocarbonsGabes(Weisssee):
"""Concentration and geological parameters of n-alkanes and n-alkenes in surface sediments from the Gulf of Gabes,
Tunisia"""
url = "10.1594/PANGAEA.774595"
class WaterChemEcuador(Weisssee):
"""weekly and biweekly Water chemistry of cloud forest streams at baseflow conditions,
Rio San Francisco, Ecuador"""
url = "10.1594/PANGAEA.778629"
class WaterChemVictoriaLakes(Weisssee):
"""Surface water chemistry of northern Victoria Land lakes"""
url = "10.1594/PANGAEA.807883"
class HydroChemJava(Weisssee):
"""Hydrochemical data from subsurface rivers, coastal and submarine springsin a karstic region
in southern Java."""
url = "10.1594/PANGAEA.882178"
class PrecipBerlin(Weisssee):
"""Sub-hourly Berlin Dahlem precipitation time-series 2001-2013"""
url = "10.1594/PANGAEA.883587"
class GeoChemMatane(Weisssee):
"""Geochemical data collected in shallow groundwater and river water in a subpolar environment
(Matane river, QC, Canada)."""
url = "10.1594/PANGAEA.908290"
class HydroMeteorAndes(Datasets):
"""Hydrometeriological dataset of tropical Andes region"""
url = ["https://springernature.figshare.com/ndownloader/files/10514506",
"https://springernature.figshare.com/ndownloader/files/10514509"]
class WeatherJena(Datasets):
"""
10 minute weather dataset of Jena, Germany hosted at https://www.bgc-jena.mpg.de/wetter/index.html
from 2002 onwards.
Examples
--------
>>> from ai4water.datasets import WeatherJena
>>> dataset = WeatherJena()
>>> data = dataset.fetch()
>>> data.sum()
"""
url = "https://www.bgc-jena.mpg.de/wetter/weather_data.html"
def __init__(self,
path=None,
obs_loc='roof'):
"""
The ETP data is collected at three different locations i.e. roof, soil and saale(hall).
Parameters
----------
obs_loc : str, optional (default=roof)
location of observation. It can be one of following
- roof
- soil
- saale
"""
if obs_loc not in ['roof', 'soil', 'saale']:
raise ValueError
self.obs_loc = obs_loc
super().__init__(path=path)
self.ds_dir = path
sub_dir = os.path.join(self.ds_dir, self.obs_loc)
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
if xr is None:
warnings.warn("""
loading data from csv files is slow.
Try installing xarray and netcdf for faster loading
""")
download_all_http_directory(self.url, sub_dir, match_name=self.obs_loc)
unzip_all_in_dir(sub_dir, 'zip')
else:
nc_path = os.path.join(sub_dir, "data.nc")
if not os.path.exists(nc_path):
download_all_http_directory(self.url, sub_dir, match_name=self.obs_loc)
unzip_all_in_dir(sub_dir, 'zip')
print("converting data to netcdf file. This will happen only once.")
df = self._read_as_df()
ndf = pd.DataFrame()
for _col in df.columns:
col = _col.replace("/", "_")
ndf[col] = df[_col].copy()
ndf = ndf.reset_index()
ndf.to_xarray().to_netcdf(nc_path)
@property
def dynamic_features(self)->list:
"""returns names of features available"""
return self.fetch().columns.tolist()
def fetch(
self,
st: Union[str, int, pd.DatetimeIndex] = None,
en: Union[str, int, pd.DatetimeIndex] = None
) -> pd.DataFrame:
"""
Fetches the time series data between given period as pandas dataframe.
Parameters
----------
st : Optional
start of data to be fetched. If None, the data from start (2003-01-01)
will be retuned
en : Optional
end of data to be fetched. If None, the data from till (2021-12-31)
end be retuned.
Returns
-------
pd.DataFrame
a pandas dataframe of shape (972111, 21)
Examples
--------
>>> from ai4water.datasets import WeatherJena
>>> dataset = WeatherJena()
>>> data = dataset.fetch()
>>> data.shape
(972111, 21)
... # get data between specific period
>>> data = dataset.fetch("20110101", "20201231")
>>> data.shape
(525622, 21)
"""
sub_dir = os.path.join(self.ds_dir, self.obs_loc)
if xr is None:
df = self._read_as_df()
else:
nc_path = os.path.join(sub_dir, "data.nc")
df = xr.load_dataset(nc_path).to_dataframe()
if 'Date Time' in df:
df.index = pd.to_datetime(df.pop('Date Time'))
if isinstance(st, int):
if en is None:
en = len(df)
assert isinstance(en, int)
return df.iloc[st:en]
elif st is not None:
return df.loc[st:en]
return df
def _read_as_df(self)->pd.DataFrame:
sub_dir = os.path.join(self.ds_dir, self.obs_loc)
all_files = glob.glob(f"{sub_dir}/*.csv")
df = pd.DataFrame()
for fpath in all_files:
f_df = pd.read_csv(fpath, index_col='Date Time',
encoding='unicode_escape', na_values=-9999)
f_df.index = pd.DatetimeIndex(f_df.index)
df = pd.concat([df, f_df]) # todo, such concatenation is slow.
return df.sort_index()
class SWECanada(Datasets):
"""
Daily Canadian historical Snow Water Equivalent dataset from 1928 to 2020
from Brown_ et al., 2019 .
Examples
--------
>>> from ai4water.datasets import SWECanada
>>> swe = SWECanada()
... # get names of all available stations
>>> stns = swe.stations()
>>> len(stns)
2607
... # get data of one station
>>> df1 = swe.fetch('SCD-NS010')
>>> df1['SCD-NS010'].shape
(33816, 3)
... # get data of 10 stations
>>> df5 = swe.fetch(5, st='20110101')
>>> df5.keys()
['YT-10AA-SC01', 'ALE-05CA805', 'SCD-NF078', 'SCD-NF086', 'INA-07RA01B']
>>> [v.shape for v in df5.values()]
[(3500, 3), (3500, 3), (3500, 3), (3500, 3), (3500, 3)]
... # get data of 0.1% of stations
>>> df2 = swe.fetch(0.001, st='20110101')
... # get data of one stations starting from 2011
>>> df3 = swe.fetch('ALE-05AE810', st='20110101')
>>> df3.keys()
>>> ['ALE-05AE810']
>>> df4 = swe.fetch(stns[0:10], st='20110101')
.. _Brown:
https://doi.org/10.1080/07055900.2019.1598843
"""
url = "https://doi.org/10.5194/essd-2021-160"
features = ['snw', 'snd', 'den']
q_flags = ['data_flag_snw', 'data_flag_snd', 'qc_flag_snw', 'qc_flag_snd']
def __init__(self, path=None, **kwargs):
super().__init__(path=path, **kwargs)
self.ds_dir = path
self._download()
def stations(self) -> list:
nc = netCDF4.Dataset(os.path.join(self.ds_dir, 'CanSWE-CanEEN_1928-2020_v1.nc'))
s = nc['station_id'][:]
return s.tolist()
@property
def start(self):
return '19280101'
@property
def end(self):
return '20200731'
def fetch(
self,
station_id: Union[None, str, float, int, list] = None,
features: Union[None, str, list] = None,
q_flags: Union[None, str, list] = None,
st=None,
en=None
) -> dict:
"""
Fetches time series data from selected stations.
Parameters
----------
station_id :
station/stations to be retrieved. In None, then data
from all stations will be returned.
features :
Names of features to be retrieved. Following features
are allowed:
- ``snw`` snow water equivalent kg/m3
- ``snd`` snow depth m
- ``den`` snowpack bulk density kg/m3
If None, then all three features will be retrieved.
q_flags :
If None, then no qflags will be returned. Following q_flag
values are available.
- ``data_flag_snw``
- ``data_flag_snd``
- ``qc_flag_snw``
- ``qc_flag_snd``
st :
start of data to be retrieved
en :
end of data to be retrived.
Returns
-------
dict
a dictionary of dataframes of shape (st:en, features + q_flags) whose
length is equal to length of stations being considered.
"""
# todo, q_flags not working
if station_id is None:
station_id = self.stations()
elif isinstance(station_id, str):
station_id = [station_id]
elif isinstance(station_id, list):
pass
elif isinstance(station_id, int):
station_id = random.sample(self.stations(), station_id)
elif isinstance(station_id, float):
num_stations = int(len(self.stations()) * station_id)
station_id = random.sample(self.stations(), num_stations)
stns = self.stations()
stn_id_dict = {k: v for k, v in zip(stns, np.arange(len(stns)))}
stn_id_dict_inv = {v: k for k, v in stn_id_dict.items()}
stn_ids = [stn_id_dict[i] for i in station_id]
features = check_attributes(features, self.features)
qflags = []
if q_flags is not None:
qflags = check_attributes(q_flags, self.q_flags)
features_to_fetch = features + qflags
all_stn_data = {}
for stn in stn_ids:
stn_df = self.fetch_station_attributes(stn, features_to_fetch, st=st, en=en)
all_stn_data[stn_id_dict_inv[stn]] = stn_df
return all_stn_data
def fetch_station_attributes(self,
stn,
features_to_fetch,
st=None,
en=None,
) -> pd.DataFrame:
"""fetches attributes of one station"""
# st, en = self._check_length(st, en)
nc = netCDF4.Dataset(os.path.join(self.ds_dir, 'CanSWE-CanEEN_1928-2020_v1.nc'))
stn_df = pd.DataFrame(columns=features_to_fetch)
for var in nc.variables:
if var in features_to_fetch:
ma = np.array(nc[var][:])
ma[ma == nc[var]._FillValue] = np.nan
ta = ma[stn, :] # target array of on station
s = pd.Series(ta, index=pd.date_range(self.start, self.end, freq='D'), name=var)
stn_df[var] = s[st:en]
nc.close()
return stn_df
class RRLuleaSweden(Datasets):
"""
Rainfall runoff data for an urban catchment from 2016-2019 following the work
of Broekhuizen et al., 2020 [11]_ .
.. [11] https://doi.org/10.5194/hess-24-869-2020
"""
url = "https://zenodo.org/record/3931582"
def __init__(self, path=None, **kwargs):
super().__init__(path=path, **kwargs)
self.ds_dir = path
self._download()
def fetch(
self,
st: Union[str, int, pd.DatetimeIndex] = None,
en: Union[str, int, pd.DatetimeIndex] = None
):
"""fetches rainfall runoff data
Parameters
----------
st : optional
start of data to be fetched. By default the data starts from
2016-06-16 20:50:00
en : optional
end of data to be fetched. By default the end is 2019-09-15 18:41
"""
flow = self.fetch_flow(st,en)
pcp = self.fetch_pcp(st, en)
return flow, pcp
def fetch_flow(
self,
st: Union[str, int, pd.DatetimeIndex] = None,
en: Union[str, int, pd.DatetimeIndex] = None
)->pd.DataFrame:
"""fetches flow data
Parameters
----------
st : optional
start of data to be fetched. By default the data starts from
2016-06-16 20:50:00
en : optional
end of data to be fetched. By default the end is 2019-09-15 18:35:00
Returns
-------
pd.DataFrame
a dataframe of shape (37_618, 3) where the columns are velocity,
level and flow rate
Examples
--------
>>> from ai4water.datasets import RRLuleaSweden
>>> dataset = RRLuleaSweden()
>>> flow = dataset.fetch_flow()
>>> flow.shape
(37618, 3)
"""
fname = os.path.join(self.ds_dir, "flow_2016_2019.csv")
df = pd.read_csv(fname, sep=";")
df.index = pd.to_datetime(df.pop("time"))
return check_st_en(df, st, en)
def fetch_pcp(
self,
st: Union[str, int, pd.DatetimeIndex] = None,
en: Union[str, int, pd.DatetimeIndex] = None
)->pd.DataFrame:
"""fetches precipitation data
Parameters
----------
st : optional
start of data to be fetched. By default the data starts from
2016-06-16 19:48:00
en : optional
end of data to be fetched. By default the end is 2019-10-26 23:59:00
Returns
-------
pd.DataFrame
a dataframe of shape (967_080, 1)
Examples
--------
>>> from ai4water.datasets import RRLuleaSweden
>>> dataset = RRLuleaSweden()
>>> pcp = dataset.fetch_pcp()
>>> pcp.shape
(967080, 1)
"""
fname = os.path.join(self.ds_dir, "prec_2016_2019.csv")
df = pd.read_csv(fname, sep=";")
df.index = pd.to_datetime(df.pop("time"))
return check_st_en(df, st, en)
class RRAlpineCatchments(Datasets):
"""
Modelled runoff in contrasting Alpine catchments in Austria from 1981 to 2100
using 14 models follwoing the work of Hanus et al., 2021 [12]_ .
past 1981 - 2010
future
.. [12] https://hess.copernicus.org/preprints/hess-2021-92/
"""
url = "https://zenodo.org/record/4539986"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._download()
class ETPAgroForestGermany(Datasets):
"""
Evapotranspiration over agroforestry sites in Germany
https://doi.org/10.5194/bg-17-5183-2020
SiteName_Landuse_Content_Figures_Tables.csv
"""
url = "https://zenodo.org/record/4038399"
class ETPTelesinaItaly(Datasets):
"""
Daily rain and reference evapotranspiration for three years 2002-2004
"""
url = "https://zenodo.org/record/3726856"
def mg_photodegradation(
inputs: list = None,
target: str = "Efficiency (%)",
encoding:str = None
)->Tuple[pd.DataFrame,
Union[LabelEncoder, OneHotEncoder, Any],
Union[LabelEncoder, OneHotEncoder, Any]]:
"""
This data is about photocatalytic degradation of melachite green dye using
nobel metal dobe BiFeO3. For further description of this data see
`Jafari et al., 2023 <https://doi.org/10.1016/j.jhazmat.2022.130031>`_ and
for the use of this data for removal efficiency prediction `see <https://github.com/ZeeshanHJ/Photocatalytic_Performance_Prediction>`_ .
This dataset consists of 1200 points collected during ~135 experiments.
Parameters
----------
inputs : list, optional
features to use as input. By default following features are used as input
- ``Catalyst_type``
- ``Surface area``
- ``Pore Volume``
- ``Catalyst_loading (g/L)``
- ``Light_intensity (W)``
- ``time (min)``
- ``solution_pH``
- ``HA (mg/L)``
- ``Anions``
- ``Ci (mg/L)``
- ``Cf (mg/L)``
target : str, optional, default="Efficiency (%)"
features to use as target. By default ``Efficiency (%)`` is used as target
which is photodegradation removal efficiency of dye from wastewater. Following
are valid target names
- ``Efficiency (%)``
- ``k_first``
- ``k_2nd``
encoding : str, default=None
type of encoding to use for the two categorical features i.e., ``Catalyst_type``
and ``Anions``, to convert them into numberical. Available options are ``ohe``,
``le`` and None. If ohe is selected the original input columns are replaced
with ohe hot encoded columns. This will result in 6 columns for Anions and
15 columns for Catalyst_type.
Returns
-------
data : pd.DataFrame
a pandas dataframe consisting of input and output features. The default
setting will result in dataframe shape of (1200, 12)
cat_encoder :
catalyst encoder
an_encoder :
encoder for anions
Examples
--------
>>> from ai4water.datasets import mg_photodegradation
>>> mg_data, catalyst_encoder, anion_encoder = mg_photodegradation()
>>> mg_data.shape
(1200, 12)
... # the default encoding is None, but if we want to use one hot encoder
>>> mg_data_ohe, cat_enc, an_enc = mg_photodegradation(encoding="ohe")
>>> mg_data_ohe.shape
(1200, 31)
>>> cat_enc.inverse_transform(mg_data_ohe.iloc[:, 9:24].values)
>>> an_enc.inverse_transform(mg_data_ohe.iloc[:, 24:30].values)
... # if we want to use label encoder
>>> mg_data_le, cat_enc, an_enc = mg_photodegradation(encoding="le")
>>> mg_data_le.shape
(1200, 12)
>>> cat_enc.inverse_transform(mg_data_le.iloc[:, 9].values.astype(int))
>>> an_enc.inverse_transform(mg_data_le.iloc[:, 10].values.astype(int))
... # By default the target is efficiency but if we want
... # to use first order k as target
>>> mg_data_k, _, _ = mg_photodegradation(target="k_first")
... # if we want to use 2nd order k as target
>>> mg_data_k2, _, _ = mg_photodegradation(target="k_2nd")
"""
df = pd.read_csv(
"https://raw.githubusercontent.com/ZeeshanHJ/Photocatalytic_Performance_Prediction/main/Raw%20data.csv"
)
default_inputs = ['Surface area', 'Pore Volume', 'Catalyst_loading (g/L)',
'Light_intensity (W)', 'time (min)', 'solution_pH', 'HA (mg/L)',
'Ci (mg/L)', 'Cf (mg/L)', 'Catalyst_type', 'Anions',
]
default_targets = ['Efficiency (%)', 'k_first', 'k_2nd']
# first order
df["k_first"] = np.log(df["Ci (mg/L)"] / df["Cf (mg/L)"]) / df["time (min)"]
# k second order
df["k_2nd"] = ((1 / df["Cf (mg/L)"]) - (1 / df["Ci (mg/L)"])) / df["time (min)"]
if inputs is None:
inputs = default_inputs
if not isinstance(target, list):
if isinstance(target, str):
target = [target]
elif isinstance(target, list):
pass
else:
target = default_targets
assert isinstance(target, list)
assert all(trgt in default_targets for trgt in target)
df = df[inputs + target]
# consider encoding of categorical features
cat_encoder, an_encoder = None, None
if encoding:
df, cols_added, cat_encoder = encode_column(df, "Catalyst_type", encoding)
df, an_added, an_encoder = encode_column(df, "Anions", encoding)
# move the target to the end
for t in target:
df[t] = df.pop(t)
return df, cat_encoder, an_encoder
def gw_punjab(
data_type:str = "full",
country:str = None,
)->pd.DataFrame:
"""
groundwater level (meters below ground level) dataset from Punjab region
(Pakistan and north-west India) following the study of MacAllister_ et al., 2022.
parameters
----------
data_type : str (default="full")
either ``full`` or ``LTS``. The ``full`` contains the
full dataset, there are 68783 rows of observed groundwater level data from
4028 individual sites. In ``LTS`` there are 7547 rows of groundwater
level observations from 130 individual sites, which have water level data available
for a period of more than 40 years and from which at least two thirds of the
annual observations are available.
country : str (default=None)
the country for which data to retrieve. Either ``PAK`` or ``IND``.
Returns
-------
pd.DataFrame
a pandas DataFrame with datetime index
Examples
---------
>>> from ai4water.datasets import gw_punjab
>>> full_data = gw_punjab()
find out the earliest observation
>>> print(full_data.sort_index().head(1))
>>> lts_data = gw_punjab()
>>> lts_data.shape
(68782, 4)
>>> df_pak = gw_punjab(country="PAK")
>>> df_pak.sort_index().dropna().head(1)
.. MacAllister : https://doi.org/10.1038/s41561-022-00926-1
"""
f = 'https://webservices.bgs.ac.uk/accessions/download/167240?fileName=India_Pakistan_WL_NGDC.xlsx'
ds_dir =os.path.join(os.path.dirname(__file__), "data", 'gw_punjab')
if not os.path.exists(ds_dir):
os.makedirs(ds_dir)
fname = os.path.join(ds_dir, "gw_punjab.xlsx")
if not os.path.exists(fname):
print(f"downloading {fname}")
download(f, fname)
assert data_type in ("full", "LTS")
if data_type == "full":
sheet_name = "Full_dataset"
else:
sheet_name = "LTS"
df = pd.read_excel(fname, sheet_name=sheet_name)
if sheet_name == "LTS":
df.iloc[5571, 3] = '01/10/1887'
df.iloc[5572, 3] = '01/10/1892'
df.iloc[6227, 3] = '01/10/1887'
df.iloc[5511, 3] = '01/10/1887'
df.iloc[5512, 3] = '01/10/1892'
df.iloc[6228, 3] = '01/10/1892'
df.index = pd.to_datetime(df.pop("DATE"))
if country:
if country == "PAK":
pak_stations = [st for st in df['OW_ID'].unique() if st.startswith("PAK")]
df = df[df['OW_ID'].isin(pak_stations)]
else:
pak_stations = [st for st in df['OW_ID'].unique() if st.startswith("IND")]
df = df[df['OW_ID'].isin(pak_stations)]
return df
def qe_biochar_ec(
input_features:List[str]=None,
encoding:str = None
)->tuple:
"""
data of adsorption capacity for removal of emerging pollutants from wastewater
using biochar. For more description of this data see `Jaffari et al., 2023 <>_`
Parameters
----------
input_features :
By default following features are used as input
- `Adsorbent``
- `Pyrolysis temperature``
- `Pyrolysis time``
- `C``
- `H``
- `O``
- `N``
- ``(O+N)/C``
- ``Ash``
- ``H/C``
- ``O/C``
- ``Surface area``
- ``Pore volume``
- ``Average pore size``
- ``Pollutant``
- ``Adsorption time``
- `concentration``
- ``Solution pH``
- ``RPM``
- ``Volume``
- ``Adsorbent dosage``
- ``Adsorption temperature``
- ``Ion concentration``
- ``Humid acid``
- ``Wastewater type``
- ``Adsorption type``
encoding : str, default=None
the type of encoding to use for categorical features. If not None, it should
be either ``ohe`` or ``le``.
Returns
--------
tuple
Examples
--------
>>> from ai4water.datasets import qe_biochar_ec
>>> data, *_ = qe_biochar_ec()
>>> data.shape
(3757, 27)
>>> data, ads_enc, pol_enc, wwt_enc, adspt_enc = qe_biochar_ec(encoding="le")
>>> data.shape
(3757, 27)
>>> ads_enc.inverse_transform(data.iloc[:, 22].values.astype(int))
>>> pol_enc.inverse_transform(data.iloc[:, 23].values.astype(int))
>>> wwt_enc.inverse_transform(data.iloc[:, 24].values.astype(int))
>>> adspt_enc.inverse_transform(data.iloc[:, 25].values.astype(int))
>>> data, adsp_enc, polt_enc, wwt_enc, adspt_enc = qe_biochar_ec(encoding="ohe")
>>> data.shape
(3757, 58)
>>> adsp_enc.inverse_transform(data.iloc[:, 22:37].values)
>>> polt_enc.inverse_transform(data.iloc[:, 37:51].values)
>>> wwt_enc.inverse_transform(data.iloc[:, 51:55].values)
>>> adspt_enc.inverse_transform(data.iloc[:, 55:-1].values)
"""
fpath = os.path.join(os.path.dirname(__file__), 'qe_biochar_ec.csv')
url = 'https://raw.githubusercontent.com/ZeeshanHJ/Adsorption-capacity-prediction-for-ECs/main/Raw_data.csv'
if os.path.exists(fpath):
data = pd.read_csv(fpath)
else:
data = pd.read_csv(url)
# remove space in 'Pyrolysis temperature '
data['Pyrolysis temperature'] = data.pop('Pyrolysis temperature ')
data['Adsorbent'] = data.pop('Adsorbent')
data['Pollutant'] = data.pop('Pollutant')
data['Wastewater type'] = data.pop('Wastewater type')
data['Adsorption type'] = data.pop('Adsorption type')
data['Capacity'] = data.pop('Capacity')
data.to_csv(fpath, index=False)
def_inputs = [
'Pyrolysis temperature',
'Pyrolysis time',
'C',
'H',
'O',
'N',
'(O+N)/C',
'Ash',
'H/C',
'O/C',
'Surface area',
'Pore volume',
'Average pore size',
'Adsorption time',
'Initial concentration',
'Solution pH',
'RPM',
'Volume',
'Adsorbent dosage',
'Adsorption temperature',
'Ion concentration',
'Humic acid',
'Adsorbent',
'Pollutant',
'Wastewater type',
'Adsorption type',
]
if input_features is not None:
assert isinstance(input_features, list)
assert all([feature in def_inputs for feature in input_features])
else:
input_features = def_inputs
data = data[input_features + ['Capacity']]
ads_enc, pol_enc, wwt_enc, adspt_enc = None, None, None, None
if encoding:
data, _, ads_enc = encode_column(data, 'Adsorbent', encoding)
data, _, pol_enc = encode_column(data, 'Pollutant', encoding)
data, _, wwt_enc = encode_column(data, 'Wastewater type', encoding)
data, _, adspt_enc = encode_column(data, 'Adsorption type', encoding)
data['Capacity'] = data.pop('Capacity')
return data, ads_enc, pol_enc, wwt_enc, adspt_enc | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/datasets/_datasets.py | _datasets.py |
import glob
import warnings
from subprocess import call
from typing import Union, Tuple
import datetime
try:
from shapely.geometry import shape, mapping
from shapely.ops import unary_union
except (ModuleNotFoundError, OSError):
shape, mapping, unary_union = None, None, None
from ai4water.backend import os, shapefile, xr, np, pd, fiona
from .utils import check_attributes, check_st_en
from ._datasets import Datasets, maybe_download
from ai4water.preprocessing.resample import Resampler
from ai4water.preprocessing.spatial_utils import find_records
class MtropicsLaos(Datasets):
"""
Downloads and prepares hydrological, climate and land use data for Laos from
Mtropics_ website and ird_ data servers.
Methods
-------
- fetch_lu
- fetch_ecoli
- fetch_rain_gauges
- fetch_weather_station_data
- fetch_pcp
- fetch_hydro
- make_regression
.. _Mtropics:
https://mtropics.obs-mip.fr/catalogue-m-tropics/
.. _ird:
https://dataverse.ird.fr/dataset.xhtml?persistentId=doi:10.23708/EWOYNK
"""
target = ['Ecoli_mpn100']
url = {
'lu.zip':
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=0f1aea48-2a51-9b42-7688-a774a8f75e7a",
'pcp.zip':
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=3c870a03-324b-140d-7d98-d3585a63e6ec",
'hydro.zip':
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=389bbea0-7279-12c1-63d0-cfc4a77ded87",
'rain_guage.zip':
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=7bc45591-5b9f-a13d-90dc-f2a75b0a15cc",
'weather_station.zip':
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=353d7f00-8d6a-2a34-c0a2-5903c64e800b",
'ecoli_data.csv':
"https://dataverse.ird.fr/api/access/datafile/5435",
"ecoli_dict.csv":
"https://dataverse.ird.fr/api/access/datafile/5436",
"soilmap.zip":
"https://dataverse.ird.fr/api/access/datafile/5430",
"subs1.zip":
"https://dataverse.ird.fr/api/access/datafile/5432",
"suro.zip":
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=f06cb605-7e59-4ba4-8faf-1beee35d2162",
"surf_feat.zip":
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=72d9e532-8910-48d2-b9a2-6c8b0241825b",
"ecoli_source.csv":
"https://dataverse.ird.fr/api/access/datafile/37737",
"ecoli_source_readme.txt":
"https://dataverse.ird.fr/api/access/datafile/37736",
"ecoli_suro_gw.csv":
"https://dataverse.ird.fr/api/access/datafile/37735",
"ecoli_suro_gw_readme.txt":
"https://dataverse.ird.fr/api/access/datafile/37734"
}
physio_chem_features = {
"T_deg": "T",
"EC_s/cm": "EC",
"DO_percent": "DOpercent",
"DO_mgl": "DO",
"pH": "pH",
"ORP_mV": "ORP", # stream water oxidation-reduction potential
"Turbidity_NTU": "Turbidity",
"TSS_gL": "TSS",
}
weather_station_data = ['air_temp', 'rel_hum', 'wind_speed', 'sol_rad']
inputs = weather_station_data + ['water_level', 'pcp', 'susp_pm', "Ecoli_source"]
def __init__(
self,
path=None,
save_as_nc:bool = True,
convert_to_csv:bool = False,
**kwargs):
if xr is None:
raise ModuleNotFoundError(
"xarray must be installed to use datasets sub-module")
super().__init__(path=path, **kwargs)
self.save_as_nc = save_as_nc
self.ds_dir = path
self.convert_to_csv = convert_to_csv
self._download()
# we need to pre-process the land use shapefiles
in_dir = os.path.join(self.ds_dir, 'lu')
out_dir = os.path.join(self.ds_dir, 'lu1')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
files = glob.glob(f'{in_dir}/*.shp')
for fpath in files:
f = os.path.basename(fpath)
shp_file = os.path.join(in_dir, f)
op = os.path.join(out_dir, f)
_process_laos_shpfiles(shp_file, op)
def surface_features(
self,
st: Union[str, int, pd.Timestamp] = '2000-10-14',
en: Union[str, int, pd.Timestamp] = '2016-11-12',
)->pd.DataFrame:
"""soil surface features data"""
fname = os.path.join(
self.ds_dir, "surf_feat", "SEDOO_EdS_Houay Pano.xlsx")
df = pd.read_excel(fname, sheet_name="Soil surface features")
df.index = pd.to_datetime(df.pop('Date'))
if st:
if isinstance(en, int):
assert isinstance(en, int)
df = df.iloc[st:en]
else:
df = df.loc[st:en]
return df
def fetch_suro(
self,
)->pd.DataFrame:
"""returns surface runoff and soil detachment data from Houay pano,
Laos PDR.
Returns
-------
pd.DataFrame
a dataframe of shape (293, 13)
Examples
--------
>>> from ai4water.datasets import MtropicsLaos
>>> laos = MtropicsLaos()
>>> suro = laos.fetch_suro()
"""
fname = os.path.join(
self.ds_dir, 'suro', 'SEDOO_Runoff_Detachment_Houay Pano.xlsx')
df = pd.read_excel(fname, sheet_name="Surface runoff soil detachment")
return df.dropna()
def fetch_lu(self, processed=False):
"""returns landuse_ data as list of shapefiles.
.. _landuse:
https://doi.org/10.1038/s41598-017-04385-2"""
lu_dir = os.path.join(self.ds_dir, f"{'lu1' if processed else 'lu'}")
files = glob.glob(f'{lu_dir}/*.shp')
return files
def fetch_physiochem(
self,
features: Union[list, str] = 'all',
st: Union[str, pd.Timestamp] = '20110525 10:00:00',
en: Union[str, pd.Timestamp] = '20210406 15:05:00',
) -> pd.DataFrame:
"""
Fetches physio-chemical features of Huoy Pano catchment Laos.
Parameters
----------
st :
start of data.
en :
end of data.
features :
The physio-chemical features to fetch. Following features
are available
- ``T``
- ``EC``
- ``DOpercent``
- ``DO``
- ``pH``
- ``ORP``
- ``Turbidity``
- ``TSS``
Returns
-------
a pandas dataframe
"""
if isinstance(features, list):
_features = []
for f in features:
_features.append(self.physio_chem_features[f])
else:
assert isinstance(features, str)
if features == 'all':
_features = features
else:
_features = self.physio_chem_features[features]
features = check_attributes(_features, list(self.physio_chem_features.values()))
fname = os.path.join(self.ds_dir, 'ecoli_data.csv')
df = pd.read_csv(fname, sep='\t')
df.index = pd.to_datetime(df['Date_Time'])
df = df[features]
col_names = {v: k for k, v in self.physio_chem_features.items() if v in features}
df = df.rename(columns=col_names)
return df.loc[st:en]
def fetch_ecoli(
self,
features: Union[list, str] = 'Ecoli_mpn100',
st: Union[str, pd.Timestamp] = '20110525 10:00:00',
en: Union[str, pd.Timestamp] = '20210406 15:05:00',
remove_duplicates: bool = True,
) -> pd.DataFrame:
"""
Fetches E. coli data collected at the outlet. See Ribolzi_ et al., 2021
and Boithias_ et al., 2021 for reference.
NaNs represent missing values. The data is randomly sampled between 2011
to 2021 during rainfall events. Total 368 E. coli observation points are
available now.
Parameters
----------
st :
start of data. By default the data is fetched from the point it
is available.
en :
end of data. By default the data is fetched til the point it is
available.
features :
E. coli concentration data. Following data are available
- Ecoli_LL_mpn100: Lower limit of the confidence interval
- Ecoli_mpn100: Stream water Escherichia coli concentration
- Ecoli_UL_mpn100: Upper limit of the confidence interval
remove_duplicates :
whether to remove duplicates or not. This is because
some values were recorded within a minute,
Returns
-------
a pandas dataframe consisting of features as columns.
.. _Ribolzi:
https://dataverse.ird.fr/dataset.xhtml?persistentId=doi:10.23708/EWOYNK
.. _Boithias:
https://doi.org/10.1002/hyp.14126
"""
fname = os.path.join(self.ds_dir, 'ecoli_data.csv')
df = pd.read_csv(fname, sep='\t')
df.index = pd.to_datetime(df['Date_Time'])
available_features = {
# Lower limit of the confidence interval
"Ecoli_LL_mpn100": "E-coli_4dilutions_95%-CI-LL",
# Stream water Escherichia coli concentration
"Ecoli_mpn100": "E-coli_4dilutions",
# Upper limit of the confidence interval
"Ecoli_UL_mpn100": "E-coli_4dilutions_95%-CI-UL"
}
if isinstance(features, list):
_features = []
for f in features:
_features.append(available_features[f])
else:
assert isinstance(features, str)
if features == 'all':
_features = features
else:
_features = available_features[features]
features = check_attributes(_features, list(available_features.values()))
if remove_duplicates:
df = df[~df.index.duplicated(keep='first')]
df = df.sort_index()
df = df[features]
col_names = {v: k for k, v in available_features.items() if v in features}
df = df.rename(columns=col_names)
return df.loc[st:en]
def fetch_rain_gauges(
self,
st: Union[str, pd.Timestamp] = "20010101",
en: Union[str, pd.Timestamp] = "20191231",
) -> pd.DataFrame:
"""
fetches data from 7 rain gauges_ which is collected at daily time step
from 2001 to 2019.
Parameters
----------
st :
start of data. By default the data is fetched from the point it
is available.
en :
end of data. By default the data is fetched til the point it is
available.
Returns
-------
a dataframe of 7 columns, where each column represnets a rain guage
observations. The length of dataframe depends upon range defined by
`st` and `en` arguments.
Examples
--------
>>> from ai4water.datasets import MtropicsLaos
>>> laos = MtropicsLaos()
>>> rg = laos.fetch_rain_gauges()
.. _gauges:
https://doi.org/10.1038/s41598-017-04385-2
"""
# todo, does nan means 0 rainfall?
fname = os.path.join(self.ds_dir, 'rain_guage', 'rain_guage.nc')
if not os.path.exists(fname) or not self.save_as_nc:
df = self._load_rain_gauge_from_xl_files()
else: # feather file already exists so load from it
try:
df = xr.load_dataset(fname).to_dataframe()
except AttributeError:
df = self._load_rain_gauge_from_xl_files()
df.index = pd.date_range('20010101', periods=len(df), freq='D')
return df[st:en]
def _load_rain_gauge_from_xl_files(self):
fname = os.path.join(self.ds_dir, 'rain_guage', 'rain_guage.nc')
files = glob.glob(f"{os.path.join(self.ds_dir, 'rain_guage')}/*.xlsx")
dfs = []
for f in files:
df = pd.read_excel(
f, sheet_name='Daily',
usecols=['R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'R7'],
keep_default_na=False)
if os.path.basename(f) in ['OMPrawdataLaos2014.xlsx']:
df = pd.read_excel(
f, sheet_name='Daily',
usecols=['R1', 'R2', 'R3', 'R4', 'R5', 'R6', 'R7'],
keep_default_na=False, nrows=366)
df = df.dropna()
dfs.append(df)
df = pd.concat(dfs)
for col in df.columns:
df[col] = pd.to_numeric(df[col])
df = df.reset_index(drop=True) # index is of type Int64Index
if self.save_as_nc:
df.to_xarray().to_netcdf(fname)
return df
def fetch_weather_station_data(
self,
st: Union[str, pd.Timestamp] = "20010101 01:00:00",
en: Union[str, pd.Timestamp] = "20200101 00:00:00",
freq: str = 'H'
) -> pd.DataFrame:
"""
fetches hourly weather [1]_ station data which consits of air temperature,
humidity, wind speed and solar radiation.
Parameters
----------
st :
start of data to be feteched.
en :
end of data to be fetched.
freq :
frequency at which the data is to be fetched.
Returns
-------
a pandas dataframe consisting of 4 columns
.. [1]:
https://doi.org/10.1038/s41598-017-04385-2
"""
nc_fname = os.path.join(
self.ds_dir, 'weather_station', 'weather_stations.nc')
if not os.path.exists(nc_fname) or not self.save_as_nc:
df = self._load_weather_stn_from_xl_files()
else: # feather file already exists so load from it
try:
df = xr.load_dataset(nc_fname).to_dataframe()
except AttributeError:
df = self._load_weather_stn_from_xl_files()
df.index = pd.to_datetime(df.pop('datetime'))
df.columns = self.weather_station_data
df = df.asfreq('H')
df = df.interpolate()
df = df.bfill()
return check_st_en(df, st, en)
def _load_weather_stn_from_xl_files(self):
nc_fname = os.path.join(
self.ds_dir, 'weather_station', 'weather_stations.nc')
files = glob.glob(
f"{os.path.join(self.ds_dir, 'weather_station')}/*.xlsx")
vbsfile = os.path.join(
self.ds_dir, "weather_station", 'ExcelToCsv.vbs')
create_vbs_script(vbsfile)
dataframes = []
for xlsx_file in files:
if not xlsx_file.startswith("~"):
if os.name == "nt":
data_dir = os.path.join(self.ds_dir, "weather_station")
df = to_csv_and_read(
xlsx_file,
data_dir,
sheed_id='2',
usecols=['Date', 'Time', 'T', 'H', 'W', 'Gr'],
parse_dates={'datetime': ['Date', 'Time']})
else:
df = pd.read_excel(xlsx_file,
sheet_name='Hourly',
usecols=['Date', 'T', 'H', 'W', 'Gr'],
parse_dates={'datetime': ['Date']},
keep_default_na=False)
df['datetime'] = pd.to_datetime(df['datetime'], errors='coerce')
df = df.dropna(how="all")
df.index = pd.to_datetime(df.pop('datetime'))
dataframes.append(df)
df = pd.concat(dataframes)
del dataframes
# non-numertic dtype causes problem in converting/saving netcdf
for col in df.columns:
df[col] = pd.to_numeric(df[col])
df = df.reset_index() # index is of type Int64Index
if self.save_as_nc:
df.to_xarray().to_netcdf(nc_fname)
return df
def fetch_pcp(self,
st: Union[str, pd.Timestamp] = '20010101 00:06:00',
en: Union[str, pd.Timestamp] = '20200101 00:06:00',
freq: str = '6min'
) -> pd.DataFrame:
"""
Fetches the precipitation_ data which is collected at 6 minutes time-step
from 2001 to 2020.
Parameters
----------
st :
starting point of data to be fetched.
en :
end point of data to be fetched.
freq :
frequency at which the data is to be returned.
Returns
-------
pandas dataframe of precipitation data
.. _precipitation:
https://doi.org/10.1038/s41598-017-04385-2
"""
# todo allow change in frequency
fname = os.path.join(self.ds_dir, 'pcp', 'pcp.nc')
# feather file does not exist
if not os.path.exists(fname) or not self.save_as_nc:
df = self._load_pcp_from_excel_files()
else: # nc file already exists so load from it
try:
df = xr.load_dataset(fname).to_dataframe()
# on linux, it is giving error
except AttributeError: # 'EntryPoints' object has no attribute 'get'
df = self._load_pcp_from_excel_files()
df.index = pd.date_range('20010101 00:06:00', periods=len(df), freq='6min')
df.columns = ['pcp']
return df[st:en]
def _load_pcp_from_excel_files(self):
fname = os.path.join(self.ds_dir, 'pcp', 'pcp.nc')
files = glob.glob(f"{os.path.join(self.ds_dir, 'pcp')}/*.xlsx")
df = pd.DataFrame()
for f in files:
_df = pd.read_excel(f, sheet_name='6mn', usecols=['Rfa'])
df = pd.concat([df, _df])
df = df.reset_index(drop=True)
if self.save_as_nc:
df.to_xarray().to_netcdf(fname)
return df
def fetch_hydro(
self,
st: Union[str, pd.Timestamp] = '20010101 00:06:00',
en: Union[str, pd.Timestamp] = '20200101 00:06:00',
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
fetches water level (cm) and suspended particulate matter (g L-1). Both
data are from 2001 to 2019 but are randomly sampled.
Parameters
----------
st : optional
starting point of data to be fetched.
en : optional
end point of data to be fetched.
Returns
-------
a tuple of pandas dataframes of water level and suspended particulate
matter.
"""
wl_fname = os.path.join(self.ds_dir, 'hydro', 'wl.nc')
spm_fname = os.path.join(self.ds_dir, 'hydro', 'spm.nc')
if not os.path.exists(wl_fname) or not self.save_as_nc:
wl, spm = self._load_hydro_from_xl_files()
else:
try:
wl = xr.load_dataset(wl_fname).to_dataframe()
spm = xr.load_dataset(spm_fname).to_dataframe()
except AttributeError:
wl, spm = self._load_hydro_from_xl_files()
wl = wl[~wl.index.duplicated(keep='first')]
spm = spm[~spm.index.duplicated(keep='first')]
# FutureWarning: Value based partial slicing on non-monotonic
# DatetimeIndexes
return wl.loc[st:en], spm.loc[st:en]
def _load_hydro_from_xl_files(self):
"""
Most of the files are saved as a peace of shit in excel.
I wish I had never consdered reading those files
"""
wl_fname = os.path.join(self.ds_dir, 'hydro', 'wl.nc')
spm_fname = os.path.join(self.ds_dir, 'hydro', 'spm.nc')
print("reading data from xlsx files and saving them in netcdf format.")
print("This will happen only once but will save io time.")
files = glob.glob(f"{os.path.join(self.ds_dir, 'hydro')}/*.xlsx")
wls = []
spms = []
for f in files:
_df = pd.read_excel(f, sheet_name='Aperiodic')
_wl = _df[['Date', 'Time', 'RWL04']]
correct_time(_wl, 'Time')
if os.path.basename(f) in ["OMPrawdataLaos2005.xlsx", "OMPrawdataLaos2001.xlsx",
"OMPrawdataLaos2006.xlsx",
"OMPrawdataLaos2012.xlsx",
"OMPrawdataLaos2013.xlsx",
"OMPrawdataLaos2014.xlsx"]:
_wl = _wl.iloc[0:-1]
if os.path.basename(f) in ["OMPrawdataLaos2011.xlsx"]:
_wl = _wl.iloc[0:-1]
if os.path.basename(f) in ["OMPrawdataLaos2008.xlsx"]:
_wl = _wl.dropna()
if os.path.basename(f) in ["OMPrawdataLaos2009.xlsx",
"OMPrawdataLaos2010.xlsx",
"OMPrawdataLaos2011.xlsx",
"OMPrawdataLaos2015.xlsx",
"OMPrawdataLaos2016.xlsx",
"OMPrawdataLaos2017.xlsx",
"OMPrawdataLaos2018.xlsx",
"OMPrawdataLaos2019.xlsx",
]:
_wl = _wl.dropna()
index = _wl['Date'].astype(str) + ' ' + _wl['Time'].astype(str)
_wl.index = pd.to_datetime(index)
_spm = _df[['Date.1', 'Time.1', 'SPM04']]
correct_time(_spm, 'Time.1')
_spm = _spm.dropna()
_spm = _spm.iloc[_spm.first_valid_index():_spm.last_valid_index()]
if os.path.basename(f) == 'OMPrawdataLaos2016.xlsx':
_spm.iloc[166] = ['2016-07-01', '20:43:47', 1.69388]
_spm.iloc[247] = ['2016-07-23', '12:57:47', 8.15714]
_spm.iloc[248] = ['2016-07-23', '17:56:47', 0.5]
_spm.iloc[352] = ['2016-08-16', '03:08:17', 1.12711864406]
if os.path.basename(f) == 'OMPrawdataLaos2017.xlsx':
_spm.index = pd.to_datetime(_spm['Date.1'].astype(str))
else:
index = _spm['Date.1'].astype(str) + ' ' + _spm['Time.1'].astype(str)
_spm.index = pd.to_datetime(index)
wls.append(_wl['RWL04'])
spms.append(_spm['SPM04'])
wl = pd.DataFrame(pd.concat(wls))
spm = pd.DataFrame(pd.concat(spms))
wl.columns = ['water_level']
spm.columns = ['susp_pm']
if self.save_as_nc:
try:
wl.to_xarray().to_netcdf(wl_fname)
except (ValueError, AttributeError):
if os.path.exists(wl_fname):
os.remove(wl_fname)
try:
spm.to_xarray().to_netcdf(spm_fname)
except (ValueError, AttributeError):
if os.path.exists(spm_fname):
os.remove(spm_fname)
return wl, spm
def make_classification(
self,
input_features: Union[None, list] = None,
output_features: Union[str, list] = None,
st: Union[None, str] = "20110525 14:00:00",
en: Union[None, str] = "20181027 00:00:00",
freq: str = "6min",
threshold: Union[int, dict] = 400,
lookback_steps: int = None,
) -> pd.DataFrame:
"""
Returns data for a classification problem.
Parameters
----------
input_features :
names of inputs to use.
output_features :
feature/features to consdier as target/output/label
st :
starting date of data. The default starting date is 20110525
en :
end date of data
freq :
frequency of data
threshold :
threshold to use to determine classes. Values greater than
equal to threshold are set to 1 while values smaller than threshold
are set to 0. The value of 400 is chosen for E. coli to make the
the number 0s and 1s balanced. It should be noted that US-EPA recommends
threshold value of 400 cfu/ml.
lookback_steps:
the number of previous steps to use. If this argument is used,
the resultant dataframe will have (ecoli_observations * lookback_steps)
rows. The resulting index will not be continuous.
Returns
-------
pd.DataFrame
a dataframe of shape `(inputs+target, st:en)`
Example
-------
>>> from ai4water.datasets import MtropicsLaos
>>> laos = MtropicsLaos()
>>> df = laos.make_classification()
"""
thresholds = {
'Ecoli_mpn100': 400
}
target: list = check_attributes(output_features, self.target)
data = self._make_ml_problem(input_features, target, st, en, freq)
if len(target) == 1:
threshold = threshold or thresholds[target[0]]
else:
raise ValueError
s = data[target[0]]
s[s < threshold] = 0
s[s >= threshold] = 1
data[target[0]] = s
if lookback_steps:
return consider_lookback(data, lookback_steps, target)
return data
def make_regression(
self,
input_features: Union[None, list] = None,
output_features: Union[str, list] = "Ecoli_mpn100",
st: Union[None, str] = "20110525 14:00:00",
en: Union[None, str] = "20181027 00:00:00",
freq: str = "6min",
lookback_steps: int = None,
replace_zeros_in_target:bool=True,
) -> pd.DataFrame:
"""
Returns data for a regression problem using hydrological, environmental,
and water quality data of Huoay pano.
Parameters
----------
input_features :
names of inputs to use. By default following features
are used as input
- ``air_temp``
- ``rel_hum``
- ``wind_speed``
- ``sol_rad``
- ``water_level``
- ``pcp``
- ``susp_pm``
- ``Ecoli_source``
output_features : feature/features to consdier as target/output/label
st :
starting date of data
en :
end date of data
freq : frequency of data
lookback_steps : int, default=None
the number of previous steps to use. If this argument is used,
the resultant dataframe will have (ecoli_observations * lookback_steps)
rows. The resulting index will not be continuous.
replace_zeros_in_target : bool, default=True
Replace the zeroes in target column with 1s.
Returns
-------
pd.DataFrame
a dataframe of shape (inputs+target, st - en)
Example
-------
>>> from ai4water.datasets import MtropicsLaos
>>> laos = MtropicsLaos()
>>> ins = ['pcp', 'air_temp']
>>> out = ['Ecoli_mpn100']
>>> reg_data = laos.make_regression(ins, out, '20110101', '20181231')
todo add HRU definition
"""
data = self._make_ml_problem(
input_features, output_features, st, en, freq,
replace_zeros_in_target=replace_zeros_in_target)
if lookback_steps:
return consider_lookback(data, lookback_steps, output_features)
return data
def _make_ml_problem(
self, input_features, output_features, st, en, freq,
replace_zeros_in_target:bool = True
):
inputs = check_attributes(input_features, self.inputs)
target = check_attributes(output_features, self.target)
features_to_fetch = inputs + target
pcp = self.fetch_pcp(st=st, en=en)
pcp = pcp.interpolate('linear', limit=5)
pcp = pcp.fillna(0.0)
w = self.fetch_weather_station_data(st=st, en=en)
assert int(w.isna().sum().sum()) == 0, f"{int(w.isna().sum().sum())}"
w.columns = ['air_temp', 'rel_hum', 'wind_speed', 'sol_rad']
w_6min = Resampler(w,
freq=freq,
how={'air_temp': 'linear',
'rel_hum': 'linear',
'wind_speed': 'linear',
'sol_rad': 'linear'
}
)()
ecoli = self.fetch_ecoli(st=st, en=en)
ecoli = ecoli.dropna()
ecoli_6min = ecoli.resample(freq).mean()
if replace_zeros_in_target:
ecoli_6min.loc[ecoli_6min['Ecoli_mpn100']==0.0] = 1.0
wl, spm = self.fetch_hydro(st=st, en=en)
wl_6min = wl.resample(freq).first().interpolate(method="linear")
spm_6min = spm.resample(freq).first().interpolate(method='linear')
# backfilling because for each month the value is given for last day of month
src = self.fetch_source().loc[:, 'NB_E. coli_total'].asfreq("6min").bfill()
src.name = "Ecoli_source"
data = pd.concat([w_6min.loc[st:en],
pcp.loc[st:en],
wl_6min.loc[st:en],
spm_6min.loc[st:en],
src[st:en],
ecoli_6min.loc[st:en],
], axis=1)
if data['water_level'].isna().sum() < 15:
data['water_level'] = data['water_level'].bfill() # only 11 nan present at start
data['water_level'] = data['water_level'].ffill() # only 1 nan is present at ned
if data['susp_pm'].isna().sum() < 40:
data['susp_pm'] = data['susp_pm'].bfill() # only 26 nan is present at ned
data['susp_pm'] = data['susp_pm'].ffill() # only 9 nan is present
return data.loc[st:en, features_to_fetch]
def fetch_source(
self
)->pd.DataFrame:
"""
returns monthly source data for E. coli at from 2001 to 2021 obtained from
`here <https://dataverse.ird.fr/dataset.xhtml?persistentId=doi:10.23708/7XJ3TB>`_
Returns
--------
pd.DataFrame of shape (252, 19)
"""
fname = os.path.join(self.ds_dir, "ecoli_source.csv")
df = pd.read_csv(fname, sep="\t")
df.index = pd.date_range("20010101", "20211231", freq="M")
df.pop('Time')
df.index.freq =pd.infer_freq(df.index)
return df
class MtropcsThailand(Datasets):
url = {
"pcp.zip":
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=27c65b5f-59cb-87c1-4fdf-628e6143d8c4",
# "hydro.zip":
#"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=9e6f7144-8984-23bd-741a-06378fabd72",
"rain_gauge.zip":
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=0a12ffcf-42bc-0289-1c55-a769ef19bb16",
"weather_station.zip":
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=fa0bca5f-caee-5c68-fed7-544fe121dcf5 "
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._download()
class MtropicsVietnam(Datasets):
url = {
"pcp.zip":
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=d74ab1b0-379b-71cc-443b-662a73b7f596",
"hydro.zip":
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=85fb6717-4095-a2a2-34b5-4f1b70cfd304",
# "lu.zip":
#"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=c3724992-a043-4bbf-8ac1-bc6f9a608c1c",
"rain_guage.zip":
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=3d3382d5-08c1-2595-190b-8568a1d2d6af",
"weather_station.zip":
"https://services.sedoo.fr/mtropics/data/v1_0/download?collectionId=8df40086-4232-d8d0-a1ed-56c860818989"
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._download()
def _process_laos_shpfiles(shape_file, out_path):
if fiona is None:
warnings.warn("preprocessing of shapefiles can not be done because no fiona installation is found.")
return
shp_reader = shapefile.Reader(shape_file)
container = {
'Forest': [],
'Culture': [],
'Fallow': [],
'Teak': [],
# 'others': []
}
for i in range(shp_reader.numRecords):
lu = find_records(shape_file, 'LU3', i)
shp = shp_reader.shape(i)
if shp.shapeType == 0:
continue
geom = shape(shp.__geo_interface__)
if lu.startswith('Forest'):
container['Forest'].append(geom)
elif lu.startswith('Culture'):
container['Culture'].append(geom)
elif lu.startswith('Fallow'):
container['Fallow'].append(geom)
elif lu.startswith('Teak'):
container['Teak'].append(geom)
else: # just consider all others as 'culture' for siplicity
container['Culture'].append(geom)
# container['others'].append(geom)
# Define a polygon feature geometry with one attribute
schema = {
'geometry': 'Polygon' if os.path.basename(shape_file) in [
'LU2000.shp', 'LU2001.shp'] else 'MultiPolygon',
'properties': {'id': 'int',
'NAME': 'str',
'area': 'float'},
}
# Write a new Shapefile
with fiona.open(out_path, 'w', 'ESRI Shapefile', schema) as c:
for idx, lu in enumerate(list(container.keys())):
geoms = container[lu]
poly = unary_union([shape(s.__geo_interface__) for s in geoms])
assert poly.is_valid
c.write({
'geometry': mapping(poly),
'properties': {'id': idx,
'NAME': lu,
'area': poly.area},
})
def consider_lookback(df:pd.DataFrame, lookback:int, col_name:str)->pd.DataFrame:
"""selects rows from dataframe considering lookback based upon nan
values in col_name"""
if isinstance(col_name, list):
assert len(col_name) == 1
col_name = col_name[0]
if not isinstance(col_name, str):
raise NotImplementedError
start = False
steps = 0
masks = np.full(len(df), False)
for idx, ecoli in enumerate(df[col_name].values[::-1]):
if not ecoli != ecoli:
start = True
steps = 0
if start and steps < lookback:
masks[idx] = True
steps += 1
# if we have started counting but the limit has reached
if start and steps > lookback:
start = False
return df.iloc[masks[::-1]]
def ecoli_mekong(
st: Union[str, pd.Timestamp, int] = "20110101",
en: Union[str, pd.Timestamp, int] = "20211231",
features:Union[str, list] = None,
overwrite=False
)->pd.DataFrame:
"""
E. coli data from Mekong river (Houay Pano) area from 2011 to 2021
Boithias et al., 2022 [1]_.
Parameters
----------
st : optional
starting time. The default starting point is 2011-05-25 10:00:00
en : optional
end time, The default end point is 2021-05-25 15:41:00
features : str, optional
names of features to use. use ``all`` to get all features. By default
following input features are selected
- ``station_name`` name of station/catchment where the observation was made
- ``T`` temperature
- ``EC`` electrical conductance
- ``DOpercent`` dissolved oxygen concentration
- ``DO`` dissolved oxygen saturation
- ``pH`` pH
- ``ORP`` oxidation-reduction potential
- ``Turbidity`` turbidity
- ``TSS`` total suspended sediment concentration
- ``E-coli_4dilutions`` Eschrechia coli concentration
overwrite : bool
whether to overwrite the downloaded file or not
Returns
-------
pd.DataFrame
with default parameters, the shape is (1602, 10)
Examples
--------
>>> from ai4water.datasets import ecoli_mekong
>>> ecoli_data = ecoli_mekong()
>>> ecoli_data.shape
(1602, 10)
.. [1]
https://essd.copernicus.org/preprints/essd-2021-440/
"""
ecoli = ecoli_houay_pano(st, en, features, overwrite=overwrite)
ecoli1 = ecoli_mekong_2016(st, en, features, overwrite=overwrite)
ecoli2 = ecoli_mekong_laos(st, en, features, overwrite=overwrite)
return pd.concat([ecoli, ecoli1, ecoli2])
def ecoli_mekong_2016(
st: Union[str, pd.Timestamp, int] = "20160101",
en: Union[str, pd.Timestamp, int] = "20161231",
features:Union[str, list] = None,
overwrite=False
)->pd.DataFrame:
"""
E. coli data from Mekong river from 2016 from 29 catchments
Parameters
----------
st :
starting time
en :
end time
features : str, optional
names of features to use. use ``all`` to get all features.
overwrite : bool
whether to overwrite the downloaded file or not
Returns
-------
pd.DataFrame
with default parameters, the shape is (58, 10)
Examples
--------
>>> from ai4water.datasets import ecoli_mekong_2016
>>> ecoli = ecoli_mekong_2016()
>>> ecoli.shape
(58, 10)
.. url_
https://dataverse.ird.fr/dataset.xhtml?persistentId=doi:10.23708/ZRSBM4
"""
url = {"ecoli_mekong_2016.csv": "https://dataverse.ird.fr/api/access/datafile/8852"}
ds_dir = os.path.join(os.path.dirname(__file__), 'data', 'ecoli_mekong_2016')
return _fetch_ecoli(ds_dir, overwrite, url, None, features, st, en,
"ecoli_houay_pano_tab_file")
def ecoli_houay_pano(
st: Union[str, pd.Timestamp, int] = "20110101",
en: Union[str, pd.Timestamp, int] = "20211231",
features:Union[str, list] = None,
overwrite=False
)->pd.DataFrame:
"""
E. coli data from Mekong river (Houay Pano) area.
Parameters
----------
st : optional
starting time. The default starting point is 2011-05-25 10:00:00
en : optional
end time, The default end point is 2021-05-25 15:41:00
features : str, optional
names of features to use. use ``all`` to get all features. By default
following input features are selected
``station_name`` name of station/catchment where the observation was made
``T`` temperature
``EC`` electrical conductance
``DOpercent`` dissolved oxygen concentration
``DO`` dissolved oxygen saturation
``pH`` pH
``ORP`` oxidation-reduction potential
``Turbidity`` turbidity
``TSS`` total suspended sediment concentration
``E-coli_4dilutions`` Eschrechia coli concentration
overwrite : bool
whether to overwrite the downloaded file or not
Returns
-------
pd.DataFrame
with default parameters, the shape is (413, 10)
Examples
--------
>>> from ai4water.datasets import ecoli_houay_pano
>>> ecoli = ecoli_houay_pano()
>>> ecoli.shape
(413, 10)
.. url_
https://dataverse.ird.fr/dataset.xhtml?persistentId=doi:10.23708/EWOYNK
"""
url = {"ecoli_houay_pano_file.csv": "https://dataverse.ird.fr/api/access/datafile/9230"}
ds_dir = os.path.join(os.path.dirname(__file__), 'data', 'ecoli_houay_pano')
return _fetch_ecoli(ds_dir, overwrite, url, None, features, st, en,
"ecoli_houay_pano_tab_file")
def ecoli_mekong_laos(
st: Union[str, pd.Timestamp, int] = "20110101",
en: Union[str, pd.Timestamp, int] = "20211231",
features:Union[str, list] = None,
station_name:str = None,
overwrite=False
)->pd.DataFrame:
"""
E. coli data from Mekong river (Northern Laos).
Parameters
----------
st :
starting time
en :
end time
station_name : str
features : str, optional
overwrite : bool
whether to overwrite or not
Returns
-------
pd.DataFrame
with default parameters, the shape is (1131, 10)
Examples
--------
>>> from ai4water.datasets import ecoli_mekong_laos
>>> ecoli = ecoli_mekong_laos()
>>> ecoli.shape
(1131, 10)
.. url_
https://dataverse.ird.fr/file.xhtml?fileId=9229&version=3.0
"""
url = {"ecoli_mekong_loas_file.csv": "https://dataverse.ird.fr/api/access/datafile/9229"}
ds_dir = os.path.join(os.path.dirname(__file__), 'data', 'ecoli_mekong_loas')
return _fetch_ecoli(ds_dir, overwrite, url, station_name, features, st, en,
"ecoli_mekong_laos_tab_file")
def _fetch_ecoli(ds_dir, overwrite, url, station_name, features, st, en, _name):
maybe_download(ds_dir, overwrite=overwrite, url=url, name=_name)
all_files = os.listdir(ds_dir)
assert len(all_files)==1
fname = os.path.join(ds_dir, all_files[0])
df = pd.read_csv(fname, sep='\t')
df.index = pd.to_datetime(df['Date_Time'])
if station_name is not None:
assert station_name in df['River'].unique().tolist()
df = df.loc[df['River']==station_name]
if features is None:
features = ['River', 'T', 'EC', 'DOpercent', 'DO', 'pH', 'ORP', 'Turbidity',
'TSS', 'E-coli_4dilutions']
features = check_attributes(features, df.columns.tolist())
df = df[features]
# River is not a representative name
df = df.rename(columns={"River": "station_name"})
if st:
if isinstance(en, int):
assert isinstance(en, int)
df = df.iloc[st:en]
else:
df = df.loc[st:en]
return df
def to_csv_and_read(
xlsx_file:str,
data_dir:str,
sheed_id:str,
**read_csv_kwargs
)->pd.DataFrame:
"""converts the xlsx file to csv and reads it to dataframe."""
vbsfile = os.path.join(data_dir, 'ExcelToCsv.vbs')
create_vbs_script(vbsfile)
assert xlsx_file.endswith(".xlsx")
fname = os.path.basename(xlsx_file).split('.')[0]
#if not fname.startswith("~"):
csv_fpath = os.path.join(data_dir, f"{fname}.csv")
if not os.path.exists(csv_fpath):
call(['cscript.exe', vbsfile, xlsx_file, csv_fpath, sheed_id])
return pd.read_csv(csv_fpath, **read_csv_kwargs)
def create_vbs_script(vbsfile):
f = open(vbsfile, 'wb')
f.write(vbscript.encode('utf-8'))
f.close()
return
vbscript="""if WScript.Arguments.Count < 3 Then
WScript.Echo "Please specify the source and the destination files. Usage: ExcelToCsv <xls/xlsx source file> <csv destination file> <worksheet number (starts at 1)>"
Wscript.Quit
End If
csv_format = 6
Set objFSO = CreateObject("Scripting.FileSystemObject")
src_file = objFSO.GetAbsolutePathName(Wscript.Arguments.Item(0))
dest_file = objFSO.GetAbsolutePathName(WScript.Arguments.Item(1))
worksheet_number = CInt(WScript.Arguments.Item(2))
Dim oExcel
Set oExcel = CreateObject("Excel.Application")
Dim oBook
Set oBook = oExcel.Workbooks.Open(src_file)
oBook.Worksheets(worksheet_number).Activate
oBook.SaveAs dest_file, csv_format
oBook.Close False
oExcel.Quit
"""
def correct_time(df, col_name):
time = df[col_name].astype(str)
ctime = []
for i in time:
if '1899' in i:
ctime.append(i[11:])
else:
ctime.append(i)
df[col_name] = ctime
return df | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/datasets/mtropics.py | mtropics.py |
__all__ = ["GRQA"]
from typing import Union, List
from ai4water.backend import pd, os
from ._datasets import Datasets
from .utils import check_st_en
class GRQA(Datasets):
"""
Global River Water Quality Archive following the work of Virro et al., 2021 [21]_.
.. [21] https://essd.copernicus.org/articles/13/5483/2021/
"""
url = 'https://zenodo.org/record/7056647#.YzBzDHZByUk'
def __init__(
self,
download_source:bool = False,
path = None,
**kwargs):
"""
parameters
----------
download_source : bool
whether to download source data or not
"""
super().__init__(path=path, **kwargs)
self.ds_dir = path
files = ['GRQA_data_v1.3.zip', 'GRQA_meta.zip']
if download_source:
files += ['GRQA_source_data.zip']
self._download(include=files)
@property
def files(self):
return os.listdir(os.path.join(self.ds_dir, "GRQA_data_v1.3", "GRQA_data_v1.3"))
@property
def parameters(self):
return [f.split('_')[0] for f in self.files]
def fetch_parameter(
self,
parameter: str = "COD",
site_name: Union[List[str], str] = None,
country: Union[List[str], str] = None,
st:Union[int, str, pd.DatetimeIndex] = None,
en:Union[int, str, pd.DatetimeIndex] = None,
)->pd.DataFrame:
"""
parameters
----------
parameter : str, optional
name of parameter
site_name : str/list, optional
location for which data is to be fetched.
country : str/list optional (default=None)
st : str
starting date date or index
en : str
end date or index
Returns
-------
pd.DataFrame
a pandas dataframe
Example
--------
>>> from ai4water.datasets import GRQA
>>> dataset = GRQA()
>>> df = dataset.fetch_parameter()
fetch data for only one country
>>> cod_pak = dataset.fetch_parameter("COD", country="Pakistan")
fetch data for only one site
>>> cod_kotri = dataset.fetch_parameter("COD", site_name="Indus River - at Kotri")
we can find out the number of data points and sites available for a specific country as below
>>> for para in dataset.parameters:
>>> data = dataset.fetch_parameter(para, country="Germany")
>>> if len(data)>0:
>>> print(f"{para}, {df.shape}, {len(df['site_name'].unique())}")
"""
assert isinstance(parameter, str)
assert parameter in self.parameters
if isinstance(site_name, str):
site_name = [site_name]
if isinstance(country, str):
country = [country]
df = self._load_df(parameter)
if site_name is not None:
assert isinstance(site_name, list)
df = df[df['site_name'].isin(site_name)]
if country is not None:
assert isinstance(country, list)
df = df[df['site_country'].isin(country)]
df.index = pd.to_datetime(df.pop("obs_date") + " " + df.pop("obs_time"))
return check_st_en(df, st, en)
def _load_df(self, parameter):
if hasattr(self, f"_load_{parameter}"):
return getattr(self, f"_load_{parameter}")()
fname = os.path.join(self.ds_dir, "GRQA_data_v1.3", "GRQA_data_v1.3", f"{parameter}_GRQA.csv")
return pd.read_csv(fname, sep=";")
def _load_DO(self):
# read_csv is causing mysterious errors
f = os.path.join(self.ds_dir, "GRQA_data_v1.3",
"GRQA_data_v1.3", f"DO_GRQA.csv")
lines = []
with open(f, 'r', encoding='utf-8') as fp:
for idx, line in enumerate(fp):
lines.append(line.split(';'))
return pd.DataFrame(lines[1:], columns=lines[0]) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/datasets/_grqa.py | _grqa.py |
import json
import glob
from typing import Union, List
from ._datasets import Datasets
from .utils import check_attributes, download, sanity_check, _unzip
from ai4water.utils.utils import dateandtime_now
from ai4water.backend import os, random, np, pd, xr
try: # shapely may not be installed, as it may be difficult to isntall and is only needed for plotting data.
from ai4water.preprocessing.spatial_utils import plot_shapefile
except ModuleNotFoundError:
plot_shapefile = None
# directory separator
SEP = os.sep
def gb_message():
link = "https://doi.org/10.5285/8344e4f3-d2ea-44f5-8afa-86d2987543a9"
raise ValueError(f"Dwonlaoad the data from {link} and provide the directory "
f"path as dataset=Camels(data=data)")
class Camels(Datasets):
"""
Get CAMELS dataset.
This class first downloads the CAMELS dataset if it is not already downloaded.
Then the selected attribute for a selected id are fetched and provided to the
user using the method `fetch`.
Attributes
-----------
- ds_dir str/path: diretory of the dataset
- dynamic_features list: tells which dynamic attributes are available in
this dataset
- static_features list: a list of static attributes.
- static_attribute_categories list: tells which kinds of static attributes
are present in this category.
Methods
---------
- stations : returns name/id of stations for which the data (dynamic attributes)
exists as list of strings.
- fetch : fetches all attributes (both static and dynamic type) of all
station/gauge_ids or a speficified station. It can also be used to
fetch all attributes of a number of stations ids either by providing
their guage_id or by just saying that we need data of 20 stations
which will then be chosen randomly.
- fetch_dynamic_features :
fetches speficied dynamic attributes of one specified station. If the
dynamic attribute is not specified, all dynamic attributes will be
fetched for the specified station. If station is not specified, the
specified dynamic attributes will be fetched for all stations.
- fetch_static_features :
works same as `fetch_dynamic_features` but for `static` attributes.
Here if the `category` is not specified then static attributes of
the specified station for all categories are returned.
stations : returns list of stations
"""
DATASETS = {
'CAMELS-BR': {'url': "https://zenodo.org/record/3964745#.YA6rUxZS-Uk",
},
'CAMELS-GB': {'url': gb_message},
}
def __init__(self, path=None, **kwargs):
super(Camels, self).__init__(path=path, **kwargs)
self.ds_dir = path
def stations(self):
raise NotImplementedError
def _read_dynamic_from_csv(self, stations, dynamic_features, st=None,
en=None)->dict:
raise NotImplementedError
def fetch_static_features(
self,
stn_id: Union[str, list],
features: Union[str, list] = None
):
"""Fetches all or selected static attributes of one or more stations.
Parameters
----------
stn_id : str
name/id of station of which to extract the data
features : list/str, optional (default="all")
The name/names of features to fetch. By default, all available
static features are returned.
Examples
--------
>>> from ai4water.datasets import CAMELS_AUS
>>> camels = CAMELS_AUS()
>>> camels.fetch_static_features('224214A')
>>> camels.static_features
>>> camels.fetch_static_features('224214A',
... features=['elev_mean', 'relief', 'ksat', 'pop_mean'])
"""
raise NotImplementedError
@property
def start(self): # start of data
raise NotImplementedError
@property
def end(self): # end of data
raise NotImplementedError
@property
def dynamic_features(self) -> list:
raise NotImplementedError
def _check_length(self, st, en):
if st is None:
st = self.start
if en is None:
en = self.end
return st, en
def to_ts(self, static, st, en, as_ts=False, freq='D'):
st, en = self._check_length(st, en)
if as_ts:
idx = pd.date_range(st, en, freq=freq)
static = pd.DataFrame(np.repeat(static.values, len(idx), axis=0), index=idx,
columns=static.columns)
return static
else:
return static
@property
def camels_dir(self):
"""Directory where all camels datasets will be saved. This will under
datasets directory"""
return os.path.join(self.base_ds_dir, "CAMELS")
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
if x is None:
x = os.path.join(self.camels_dir, self.__class__.__name__)
if not os.path.exists(x):
os.makedirs(x)
else:
assert os.path.exists(x), f"No data exist at {x}"
# sanity_check(self.name, x)
self._ds_dir = x
def fetch(self,
stations: Union[str, list, int, float, None] = None,
dynamic_features: Union[list, str, None] = 'all',
static_features: Union[str, list, None] = None,
st: Union[None, str] = None,
en: Union[None, str] = None,
as_dataframe: bool = False,
**kwargs
) -> Union[dict, pd.DataFrame]:
"""
Fetches the attributes of one or more stations.
Arguments:
stations : if string, it is supposed to be a station name/gauge_id.
If list, it will be a list of station/gauge_ids. If int, it will
be supposed that the user want data for this number of
stations/gauge_ids. If None (default), then attributes of all
available stations. If float, it will be supposed that the user
wants data of this fraction of stations.
dynamic_features : If not None, then it is the attributes to be
fetched. If None, then all available attributes are fetched
static_features : list of static attributes to be fetches. None
means no static attribute will be fetched.
st : starting date of data to be returned. If None, the data will be
returned from where it is available.
en : end date of data to be returned. If None, then the data will be
returned till the date data is available.
as_dataframe : whether to return dynamic attributes as pandas
dataframe or as xarray dataset.
kwargs : keyword arguments to read the files
returns:
If both static and dynamic features are obtained then it returns a
dictionary whose keys are station/gauge_ids and values are the
attributes and dataframes.
Otherwise either dynamic or static features are returned.
Examples
--------
>>> dataset = CAMELS_AUS()
>>> # get data of 10% of stations
>>> df = dataset.fetch(stations=0.1, as_dataframe=True) # returns a multiindex dataframe
... # fetch data of 5 (randomly selected) stations
>>> df = dataset.fetch(stations=5, as_dataframe=True)
... # fetch data of 3 selected stations
>>> df = dataset.fetch(stations=['912101A','912105A','915011A'], as_dataframe=True)
... # fetch data of a single stations
>>> df = dataset.fetch(stations='318076', as_dataframe=True)
... # get both static and dynamic features as dictionary
>>> data = dataset.fetch(1, static_features="all", as_dataframe=True) # -> dict
>>> data['dynamic']
... # get only selected dynamic features
>>> df = dataset.fetch(stations='318076',
... dynamic_features=['streamflow_MLd', 'solarrad_AWAP'], as_dataframe=True)
... # fetch data between selected periods
>>> df = dataset.fetch(stations='318076', st="20010101", en="20101231", as_dataframe=True)
"""
if isinstance(stations, int):
# the user has asked to randomly provide data for some specified number of stations
stations = random.sample(self.stations(), stations)
elif isinstance(stations, list):
pass
elif isinstance(stations, str):
stations = [stations]
elif isinstance(stations, float):
num_stations = int(len(self.stations()) * stations)
stations = random.sample(self.stations(), num_stations)
elif stations is None:
# fetch for all stations
stations = self.stations()
else:
raise TypeError(f"Unknown value provided for stations {stations}")
if xr is None:
raise ModuleNotFoundError("modeule xarray must be installed to use `datasets` module")
return self.fetch_stations_attributes(
stations,
dynamic_features,
static_features,
st=st,
en=en,
as_dataframe=as_dataframe,
**kwargs
)
def _maybe_to_netcdf(self, fname: str):
self.dyn_fname = os.path.join(self.ds_dir, f'{fname}.nc')
if not os.path.exists(self.dyn_fname):
# saving all the data in netCDF file using xarray
print(f'converting data to netcdf format for faster io operations')
data = self.fetch(static_features=None)
data_vars = {}
coords = {}
for k, v in data.items():
data_vars[k] = (['time', 'dynamic_features'], v)
index = v.index
index.name = 'time'
coords = {
'dynamic_features': list(v.columns),
'time': index
}
xds = xr.Dataset(
data_vars=data_vars,
coords=coords,
attrs={'date': f"create on {dateandtime_now()}"}
)
xds.to_netcdf(self.dyn_fname)
def fetch_stations_attributes(
self,
stations: list,
dynamic_features='all',
static_features=None,
st=None,
en=None,
as_dataframe: bool = False,
**kwargs
):
"""Reads attributes of more than one stations.
Arguments:
stations : list of stations for which data is to be fetched.
dynamic_features : list of dynamic attributes to be fetched.
if 'all', then all dynamic attributes will be fetched.
static_features : list of static attributes to be fetched.
If `all`, then all static attributes will be fetched. If None,
then no static attribute will be fetched.
st : start of data to be fetched.
en : end of data to be fetched.
as_dataframe : whether to return the data as pandas dataframe. default
is xr.dataset object
kwargs dict: additional keyword arguments
Returns:
Dynamic and static features of multiple stations. Dynamic features
are by default returned as xr.Dataset unless `as_dataframe` is True, in
such a case, it is a pandas dataframe with multiindex. If xr.Dataset,
it consists of `data_vars` equal to number of stations and for each
station, the `DataArray` is of dimensions (time, dynamic_features).
where `time` is defined by `st` and `en` i.e length of `DataArray`.
In case, when the returned object is pandas DataFrame, the first index
is `time` and second index is `dyanamic_features`. Static attributes
are always returned as pandas DataFrame and have following shape
`(stations, static_features). If `dynamic_features` is None,
then they are not returned and the returned value only consists of
static features. Same holds true for `static_features`.
If both are not None, then the returned type is a dictionary with
`static` and `dynamic` keys.
Raises:
ValueError, if both dynamic_features and static_features are None
Examples
--------
>>> from ai4water.datasets import CAMELS_AUS
>>> dataset = CAMELS_AUS()
... # find out station ids
>>> dataset.stations()
... # get data of selected stations
>>> dataset.fetch_stations_attributes(['912101A', '912105A', '915011A'],
... as_dataframe=True)
"""
st, en = self._check_length(st, en)
if dynamic_features is not None:
dynamic_features = check_attributes(dynamic_features, self.dynamic_features)
if not os.path.exists(self.dyn_fname):
# read from csv files
# following code will run only once when fetch is called inside init method
dyn = self._read_dynamic_from_csv(stations, dynamic_features, st=st, en=en)
else:
dyn = xr.load_dataset(self.dyn_fname) # daataset
dyn = dyn[stations].sel(dynamic_features=dynamic_features, time=slice(st, en))
if as_dataframe:
dyn = dyn.to_dataframe(['time', 'dynamic_features'])
if static_features is not None:
static = self.fetch_static_features(stations, static_features)
stns = {'dynamic': dyn, 'static': static}
else:
stns = dyn
elif static_features is not None:
return self.fetch_static_features(stations, static_features)
else:
raise ValueError
return stns
def fetch_dynamic_features(
self,
stn_id: str,
features='all',
st=None,
en=None,
as_dataframe=False
):
"""Fetches all or selected dynamic attributes of one station.
Parameters
----------
stn_id : str
name/id of station of which to extract the data
features : list/str, optional (default="all")
The name/names of features to fetch. By default, all available
dynamic features are returned.
st : Optional (default=None)
start time from where to fetch the data.
en : Optional (default=None)
end time untill where to fetch the data
as_dataframe : bool, optional (default=False)
if true, the returned data is pandas DataFrame otherwise it
is xarray dataset
Examples
--------
>>> from ai4water.datasets import CAMELS_AUS
>>> camels = CAMELS_AUS()
>>> camels.fetch_dynamic_features('224214A', as_dataframe=True).unstack()
>>> camels.dynamic_features
>>> camels.fetch_dynamic_features('224214A',
... attributes=['tmax_AWAP', 'vprp_AWAP', 'streamflow_mmd'],
... as_dataframe=True).unstack()
"""
assert isinstance(stn_id, str), f"station id must be string is is of type {type(stn_id)}"
station = [stn_id]
return self.fetch_stations_attributes(
station,
features,
None,
st=st,
en=en,
as_dataframe=as_dataframe
)
def fetch_station_attributes(
self,
station: str,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
as_ts: bool = False,
st: Union[str, None] = None,
en: Union[str, None] = None,
**kwargs
) -> pd.DataFrame:
"""
Fetches attributes for one station.
Parameters
-----------
station :
station id/gauge id for which the data is to be fetched.
dynamic_features : str/list, optional
names of dynamic features/attributes to fetch
static_features :
names of static features/attributes to be fetches
as_ts : bool
whether static attributes are to be converted into a time
series or not. If yes then the returned time series will be of
same length as that of dynamic attribtues.
st : str,optional
starting point from which the data to be fetched. By default
the data will be fetched from where it is available.
en : str, optional
end point of data to be fetched. By default the dat will be fetched
Returns
-------
pd.DataFrame
dataframe if as_ts is True else it returns a dictionary of static and
dynamic attributes for a station/gauge_id
Examples
--------
>>> from ai4water.datasets import CAMELS_AUS
>>> dataset = CAMELS_AUS()
>>> dataset.fetch_station_attributes('912101A')
"""
st, en = self._check_length(st, en)
station_df = pd.DataFrame()
if dynamic_features:
dynamic = self.fetch_dynamic_features(station, dynamic_features, st=st,
en=en, **kwargs)
station_df = pd.concat([station_df, dynamic])
if static_features is not None:
static = self.fetch_static_features(station, static_features)
if as_ts:
station_df = pd.concat([station_df, static], axis=1)
else:
station_df = {'dynamic': station_df, 'static': static}
elif static_features is not None:
station_df = self.fetch_static_features(station, static_features)
return station_df
class LamaH(Camels):
"""
Large-Sample Data for Hydrology and Environmental Sciences for Central Europe
from Zenodo_ following the work of Klingler_ et al., 2021 .
.. _Zenodo:
https://zenodo.org/record/4609826#.YFNp59zt02w
.. _Klingler:
https://essd.copernicus.org/preprints/essd-2021-72/
"""
url = "https://zenodo.org/record/4609826#.YFNp59zt02w"
_data_types = ['total_upstrm', 'diff_upstrm_all', 'diff_upstrm_lowimp']
time_steps = ['daily', 'hourly']
static_attribute_categories = ['']
def __init__(self, *,
time_step: str,
data_type: str,
**kwargs
):
"""
Parameters
----------
time_step :
possible values are ``daily`` or ``hourly``
data_type :
possible values are ``total_upstrm``, ``diff_upstrm_all``
or `diff_upstrm_lowimp`
Examples
--------
>>> from ai4water.datasets import LamaH
>>> dataset = LamaH(time_step='daily', data_type='total_upstrm')
>>> df = dataset.fetch(3, as_dataframe=True)
"""
assert time_step in self.time_steps, f"invalid time_step {time_step} given"
assert data_type in self._data_types, f"invalid data_type {data_type} given."
self.time_step = time_step
self.data_type = data_type
super().__init__(**kwargs)
fpath = os.path.join(self.ds_dir, 'lamah_diff_upstrm_lowimp_hourly_dyn.nc')
_data_types = self._data_types if self.time_step == 'daily' else ['total_upstrm']
if not os.path.exists(fpath):
for dt in _data_types:
for ts in self.time_steps:
self.time_step = ts
self.data_type = dt
fname = f"lamah_{dt}_{ts}_dyn"
self._maybe_to_netcdf(fname)
self.time_step = time_step
self.data_type = data_type
self.dyn_fname = os.path.join(self.ds_dir,
f'lamah_{data_type}_{time_step}_dyn.nc')
@property
def dynamic_features(self):
station = self.stations()[0]
df = self.read_ts_of_station(station)
return df.columns.to_list()
@property
def static_features(self) -> list:
fname = os.path.join(self.data_type_dir,
f'1_attributes{SEP}Catchment_attributes.csv')
df = pd.read_csv(fname, sep=';', index_col='ID')
return df.columns.to_list()
@property
def data_type_dir(self):
directory = 'CAMELS_AT'
if self.time_step == 'hourly':
directory = 'CAMELS_AT1' # todo, use it only for hourly, daily is causing errors
# self.ds_dir/CAMELS_AT/data_type_dir
f = [f for f in os.listdir(os.path.join(self.ds_dir, directory)) if self.data_type in f][0]
return os.path.join(self.ds_dir, f'{directory}{SEP}{f}')
def stations(self) -> list:
# assuming file_names of the format ID_{stn_id}.csv
_dirs = os.listdir(os.path.join(self.data_type_dir,
f'2_timeseries{SEP}{self.time_step}'))
s = [f.split('_')[1].split('.csv')[0] for f in _dirs]
return s
def _read_dynamic_from_csv(self,
stations,
dynamic_features: Union[str, list] = 'all',
st=None,
en=None,
):
"""Reads attributes of one station"""
stations_attributes = {}
for station in stations:
station_df = pd.DataFrame()
if dynamic_features is not None:
dynamic_df = self.read_ts_of_station(station)
station_df = pd.concat([station_df, dynamic_df])
stations_attributes[station] = station_df
return stations_attributes
def fetch_static_features(
self,
stn_id: Union[str, List[str]],
features:Union[str, List[str]]=None
) -> pd.DataFrame:
"""
static features of LamaH
Parameters
----------
stn_id : str
name/id of station of which to extract the data
features : list/str, optional (default="all")
The name/names of features to fetch. By default, all available
static features are returned.
Examples
--------
>>> from ai4water.datasets import LamaH
>>> dataset = LamaH(time_step='daily', data_type='total_upstrm')
>>> df = dataset.fetch_static_features('99') # (1, 61)
... # get list of all static features
>>> dataset.static_features
>>> dataset.fetch_static_features('99',
>>> features=['area_calc', 'elev_mean', 'agr_fra', 'sand_fra']) # (1, 4)
"""
fname = os.path.join(self.data_type_dir,
f'1_attributes{SEP}Catchment_attributes.csv')
df = pd.read_csv(fname, sep=';', index_col='ID')
# if features is not None:
static_features = check_attributes(features, self.static_features)
df = df[static_features]
if isinstance(stn_id, list):
stations = [str(i) for i in stn_id]
elif isinstance(stn_id, int):
stations = str(stn_id)
else:
stations = stn_id
df.index = df.index.astype(str)
df = df.loc[stations]
if isinstance(df, pd.Series):
df = pd.DataFrame(df).transpose()
return df
def read_ts_of_station(self, station) -> pd.DataFrame:
# read a file containing timeseries data for one station
fname = os.path.join(self.data_type_dir,
f'2_timeseries{SEP}{self.time_step}{SEP}ID_{station}.csv')
df = pd.read_csv(fname, sep=';')
if self.time_step == 'daily':
periods = pd.PeriodIndex(year=df["YYYY"], month=df["MM"], day=df["DD"],
freq="D")
df.index = periods.to_timestamp()
else:
periods = pd.PeriodIndex(year=df["YYYY"],
month=df["MM"], day=df["DD"], hour=df["hh"],
minute=df["mm"], freq="H")
df.index = periods.to_timestamp()
# remove the cols specifying index
[df.pop(item) for item in ['YYYY', 'MM', 'DD', 'hh', 'mm'] if item in df]
return df
@property
def start(self):
return "19810101"
@property
def end(self):
return "20191231"
class HYSETS(Camels):
"""
database for hydrometeorological modeling of 14,425 North American watersheds
from 1950-2018 following the work of `Arsenault et al., 2020 <https://doi.org/10.1038/s41597-020-00583-2>`_
The user must manually download the files, unpack them and provide
the `path` where these files are saved.
This data comes with multiple sources. Each source having one or more dynamic_features
Following data_source are available.
+---------------+------------------------------+
|sources | dynamic_features |
|===============|==============================|
|SNODAS_SWE | dscharge, swe |
+---------------+------------------------------+
|SCDNA | discharge, pr, tasmin, tasmax|
+---------------+------------------------------+
|nonQC_stations | discharge, pr, tasmin, tasmax|
+---------------+------------------------------+
|Livneh | discharge, pr, tasmin, tasmax|
+---------------+------------------------------+
|ERA5 | discharge, pr, tasmax, tasmin|
+---------------+------------------------------+
|ERAS5Land_SWE | discharge, swe |
+---------------+------------------------------+
|ERA5Land | discharge, pr, tasmax, tasmin|
+---------------+------------------------------+
all sources contain one or more following dynamic_features
with following shapes
+----------------------------+------------------+
|dynamic_features | shape |
|============================|==================|
|time | (25202,) |
+----------------------------+------------------+
|watershedID | (14425,) |
+----------------------------+------------------+
|drainage_area | (14425,) |
+----------------------------+------------------+
|drainage_area_GSIM | (14425,) |
+----------------------------+------------------+
|flag_GSIM_boundaries | (14425,) |
+----------------------------+------------------+
|flag_artificial_boundaries | (14425,) |
+----------------------------+------------------+
|centroid_lat | (14425,) |
+----------------------------+------------------+
|centroid_lon | (14425,) |
+----------------------------+------------------+
|elevation | (14425,) |
+----------------------------+------------------+
|slope | (14425,) |
+----------------------------+------------------+
|discharge | (14425, 25202) |
+----------------------------+------------------+
|pr | (14425, 25202) |
+----------------------------+------------------+
|tasmax | (14425, 25202) |
+----------------------------+------------------+
|tasmin | (14425, 25202) |
+----------------------------+------------------+
Examples
--------
>>> from ai4water.datasets import HYSETS
>>> dataset = HYSETS(path="path/to/HYSETS")
... # fetch data of a random station
>>> df = dataset.fetch(1, as_dataframe=True)
>>> df.shape
(25202, 5)
>>> stations = dataset.stations()
>>> len(stations)
14425
>>> df = dataset.fetch('999', as_dataframe=True)
>>> df.unstack().shape
(25202, 5)
"""
doi = "https://doi.org/10.1038/s41597-020-00583-2"
url = "https://osf.io/rpc3w/"
Q_SRC = ['ERA5', 'ERA5Land', 'ERA5Land_SWE', 'Livneh', 'nonQC_stations', 'SCDNA', 'SNODAS_SWE']
SWE_SRC = ['ERA5Land_SWE', 'SNODAS_SWE']
OTHER_SRC = [src for src in Q_SRC if src not in ['ERA5Land_SWE', 'SNODAS_SWE']]
dynamic_features = ['discharge', 'swe', 'tasmin', 'tasmax', 'pr']
def __init__(self,
path: str,
swe_source: str = "SNODAS_SWE",
discharge_source: str = "ERA5",
tasmin_source: str = "ERA5",
tasmax_source: str = "ERA5",
pr_source: str = "ERA5",
**kwargs
):
"""
Arguments:
path : path where all the data files are saved.
swe_source : source of swe data.
discharge_source : source of discharge data
tasmin_source : source of tasmin data
tasmax_source : source of tasmax data
pr_source : source of pr data
kwargs : arguments for `Camels` base class
"""
assert swe_source in self.SWE_SRC, f'source must be one of {self.SWE_SRC}'
assert discharge_source in self.Q_SRC, f'source must be one of {self.Q_SRC}'
assert tasmin_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
assert tasmax_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
assert pr_source in self.OTHER_SRC, f'source must be one of {self.OTHER_SRC}'
self.sources = {
'swe': swe_source,
'discharge': discharge_source,
'tasmin': tasmin_source,
'tasmax': tasmax_source,
'pr': pr_source
}
super().__init__(**kwargs)
self.ds_dir = path
fpath = os.path.join(self.ds_dir, 'hysets_dyn.nc')
if not os.path.exists(fpath):
self._maybe_to_netcdf('hysets_dyn')
def _maybe_to_netcdf(self, fname: str):
# todo saving as one file takes very long time
oneD_vars = []
twoD_vars = []
for src in self.Q_SRC:
xds = xr.open_dataset(os.path.join(self.ds_dir, f'HYSETS_2020_{src}.nc'))
for var in xds.variables:
print(f'getting {var} from source {src} ')
if len(xds[var].data.shape) > 1:
xar = xds[var]
xar.name = f"{xar.name}_{src}"
twoD_vars.append(xar)
else:
xar = xds[var]
xar.name = f"{xar.name}_{src}"
oneD_vars.append(xar)
oneD_xds = xr.merge(oneD_vars)
twoD_xds = xr.merge(twoD_vars)
oneD_xds.to_netcdf(os.path.join(self.ds_dir, "hysets_static.nc"))
twoD_xds.to_netcdf(os.path.join(self.ds_dir, "hysets_dyn.nc"))
return
@property
def ds_dir(self):
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
sanity_check('HYSETS', x)
self._ds_dir = x
@property
def static_features(self)->list:
df = self.read_static_data()
return df.columns.to_list()
def stations(self) -> List[str]:
"""
Returns
-------
list
a list of ids of stations
Examples
--------
>>> dataset = HYSETS()
... # get name of all stations as list
>>> dataset.stations()
"""
return self.read_static_data().index.to_list()
@property
def start(self)->str:
return "19500101"
@property
def end(self)->str:
return "20181231"
def fetch_stations_attributes(
self,
stations: list,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
st=None,
en=None,
as_dataframe: bool = False,
**kwargs
):
"""returns attributes of multiple stations
Examples
--------
>>> from ai4water.datasets import HYSETS
>>> dataset = HYSETS()
>>> stations = dataset.stations()[0:3]
>>> attributes = dataset.fetch_stations_attributes(stations)
"""
stations = check_attributes(stations, self.stations())
stations = [int(stn) for stn in stations]
if dynamic_features is not None:
dyn = self._fetch_dynamic_features(stations=stations,
dynamic_features=dynamic_features,
as_dataframe=as_dataframe,
st=st,
en=en,
**kwargs
)
if static_features is not None: # we want both static and dynamic
to_return = {}
static = self._fetch_static_features(station=stations,
static_features=static_features,
st=st,
en=en,
**kwargs
)
to_return['static'] = static
to_return['dynamic'] = dyn
else:
to_return = dyn
elif static_features is not None:
# we want only static
to_return = self._fetch_static_features(
station=stations,
static_features=static_features,
**kwargs
)
else:
raise ValueError
return to_return
def fetch_dynamic_features(
self,
stn_id,
features='all',
st=None,
en=None,
as_dataframe=False
):
"""Fetches dynamic attributes of one station.
Examples
--------
>>> from ai4water.datasets import HYSETS
>>> dataset = HYSETS()
>>> dyn_features = dataset.fetch_dynamic_features('station_name')
"""
station = [int(stn_id)]
return self._fetch_dynamic_features(
stations=station,
dynamic_features=features,
st=st,
en=en,
as_dataframe=as_dataframe
)
def _fetch_dynamic_features(
self,
stations: list,
dynamic_features='all',
st=None,
en=None,
as_dataframe=False,
as_ts=False
):
"""Fetches dynamic attributes of station."""
st, en = self._check_length(st, en)
attrs = check_attributes(dynamic_features, self.dynamic_features)
stations = np.subtract(stations, 1).tolist()
# maybe we don't need to read all variables
sources = {k: v for k, v in self.sources.items() if k in attrs}
# original .nc file contains datasets with dynamic and static features as data_vars
# however, for uniformity of this API and easy usage, we want a Dataset to have
# station names/gauge_ids as data_vars and each data_var has
# dimension (time, dynamic_variables)
# Therefore, first read all data for each station from .nc file
# then rearrange it.
# todo, this operation is slower because of `to_dataframe`
# also doing this removes all the metadata
x = {}
f = os.path.join(self.ds_dir, "hysets_dyn.nc")
xds = xr.open_dataset(f)
for stn in stations:
xds1 = xds[[f'{k}_{v}' for k, v in sources.items()]].sel(watershed=stn, time=slice(st, en))
xds1 = xds1.rename_vars({f'{k}_{v}': k for k, v in sources.items()})
x[stn] = xds1.to_dataframe(['time'])
xds = xr.Dataset(x)
xds = xds.rename_dims({'dim_1': 'dynamic_features'})
xds = xds.rename_vars({'dim_1': 'dynamic_features'})
if as_dataframe:
return xds.to_dataframe(['time', 'dynamic_features'])
return xds
def _fetch_static_features(
self,
station,
static_features: Union[str, list] = 'all',
st=None,
en=None,
as_ts=False
):
df = self.read_static_data()
static_features = check_attributes(static_features, self.static_features)
if isinstance(station, str):
station = [station]
elif isinstance(station, int):
station = [str(station)]
elif isinstance(station, list):
station = [str(stn) for stn in station]
else:
raise ValueError
return self.to_ts(df.loc[station][static_features], st=st, en=en, as_ts=as_ts)
def fetch_static_features(
self,
stn_id: Union[str, List[str]],
features:Union[str, List[str]]="all",
st=None,
en=None,
as_ts=False
) -> pd.DataFrame:
"""returns static atttributes of one or multiple stations
Parameters
----------
stn_id : str
name/id of station of which to extract the data
features : list/str, optional (default="all")
The name/names of features to fetch. By default, all available
static features are returned.
Examples
---------
>>> from ai4water.datasets import HYSETS
>>> dataset = HYSETS()
get the names of stations
>>> stns = dataset.stations()
>>> len(stns)
14425
get all static data of all stations
>>> static_data = dataset.fetch_static_features(stns)
>>> static_data.shape
(14425, 28)
get static data of one station only
>>> static_data = dataset.fetch_static_features('991')
>>> static_data.shape
(1, 28)
get the names of static features
>>> dataset.static_features
get only selected features of all stations
>>> static_data = dataset.fetch_static_features(stns, ['Drainage_Area_km2', 'Elevation_m'])
>>> static_data.shape
(14425, 2)
"""
return self._fetch_static_features(stn_id, features, st, en, as_ts)
def read_static_data(self):
fname = os.path.join(self.ds_dir, 'HYSETS_watershed_properties.txt')
static_df = pd.read_csv(fname, index_col='Watershed_ID', sep=';')
static_df.index = static_df.index.astype(str)
return static_df
class CAMELS_US(Camels):
"""
Downloads and processes CAMELS dataset of 671 catchments named as CAMELS
from https://ral.ucar.edu/solutions/products/camels following Newman et al., 2015 [1]_
Examples
--------
>>> from ai4water.datasets import CAMELS_US
>>> dataset = CAMELS_US(path=r'F:\data\CAMELS\CAMELS_US')
>>> df = dataset.fetch(stations=1, as_dataframe=True)
>>> df = df.unstack() # the returned dataframe is a multi-indexed dataframe so we have to unstack it
>>> df.shape
(12784, 8)
# get name of all stations as list
>>> stns = dataset.stations()
>>> len(stns)
671
# get data by station id
>>> df = dataset.fetch(stations='11478500', as_dataframe=True).unstack()
>>> df.shape
(12784, 8)
# get names of available dynamic features
>>> dataset.dynamic_features
# get only selected dynamic features
>>> df = dataset.fetch(1, as_dataframe=True,
... dynamic_features=['prcp(mm/day)', 'srad(W/m2)', 'tmax(C)', 'tmin(C)', 'Flow']).unstack()
>>> df.shape
(12784, 5)
# get names of available static features
>>> dataset.static_features
# get data of 10 random stations
>>> df = dataset.fetch(10, as_dataframe=True)
>>> df.shape
(102272, 10) # remember this is multi-indexed DataFrame
.. [1]_ https://doi.org/10.5194/hess-19-209-2015
"""
DATASETS = ['CAMELS_US']
url = "https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/basin_timeseries_v1p2_metForcing_obsFlow.zip"
catchment_attr_url = "https://ral.ucar.edu/sites/default/files/public/product-tool/camels-catchment-attributes-and-meteorology-for-large-sample-studies-dataset-downloads/camels_attributes_v2.0.zip"
folders = {'basin_mean_daymet': f'basin_mean_forcing{SEP}daymet',
'basin_mean_maurer': f'basin_mean_forcing{SEP}maurer',
'basin_mean_nldas': f'basin_mean_forcing{SEP}nldas',
'basin_mean_v1p15_daymet': f'basin_mean_forcing{SEP}v1p15{SEP}daymet',
'basin_mean_v1p15_nldas': f'basin_mean_forcing{SEP}v1p15{SEP}nldas',
'elev_bands': f'elev{SEP}daymet',
'hru': f'hru_forcing{SEP}daymet'}
dynamic_features = ['dayl(s)', 'prcp(mm/day)', 'srad(W/m2)',
'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)', 'Flow']
def __init__(self, data_source='basin_mean_daymet', path=None):
assert data_source in self.folders, f'allwed data sources are {self.folders.keys()}'
self.data_source = data_source
super().__init__(path=path, name="CAMELS_US")
self.ds_dir = path
if os.path.exists(self.ds_dir):
print(f"dataset is already downloaded at {self.ds_dir}")
else:
download(self.url, os.path.join(self.camels_dir, f'CAMELS_US{SEP}CAMELS_US.zip'))
download(self.catchment_attr_url, os.path.join(self.camels_dir, f"CAMELS_US{SEP}catchment_attrs.zip"))
_unzip(self.ds_dir)
self.attr_dir = os.path.join(self.ds_dir, f'catchment_attrs{SEP}camels_attributes_v2.0')
self.dataset_dir = os.path.join(self.ds_dir, f'CAMELS_US{SEP}basin_dataset_public_v1p2')
self._maybe_to_netcdf('camels_us_dyn')
@property
def start(self):
return "19800101"
@property
def end(self):
return "20141231"
@property
def static_features(self):
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, 'catchment_attrs', 'camels_attributes_v2.0')}/*.txt")
cols = []
for f in files:
_df = pd.read_csv(f, sep=';', index_col='gauge_id', nrows=1)
cols += list(_df.columns)
else:
df = pd.read_csv(static_fpath, index_col='gauge_id', nrows=1)
cols = list(df.columns)
return cols
def stations(self) -> list:
stns = []
for _dir in os.listdir(os.path.join(self.dataset_dir, 'usgs_streamflow')):
cat = os.path.join(self.dataset_dir, f'usgs_streamflow{SEP}{_dir}')
stns += [fname.split('_')[0] for fname in os.listdir(cat)]
# remove stations for which static values are not available
for stn in ['06775500', '06846500', '09535100']:
stns.remove(stn)
return stns
def _read_dynamic_from_csv(self,
stations,
dynamic_features: Union[str, list] = 'all',
st=None,
en=None,
):
dyn = {}
for station in stations:
# attributes = check_attributes(dynamic_features, self.dynamic_features)
assert isinstance(station, str)
df = None
df1 = None
dir_name = self.folders[self.data_source]
for cat in os.listdir(os.path.join(self.dataset_dir, dir_name)):
cat_dirs = os.listdir(os.path.join(self.dataset_dir, f'{dir_name}{SEP}{cat}'))
stn_file = f'{station}_lump_cida_forcing_leap.txt'
if stn_file in cat_dirs:
df = pd.read_csv(os.path.join(self.dataset_dir,
f'{dir_name}{SEP}{cat}{SEP}{stn_file}'),
sep="\s+|;|:",
skiprows=4,
engine='python',
names=['Year', 'Mnth', 'Day', 'Hr', 'dayl(s)', 'prcp(mm/day)', 'srad(W/m2)',
'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)'],
)
df.index = pd.to_datetime(
df['Year'].map(str) + '-' + df['Mnth'].map(str) + '-' + df['Day'].map(str))
flow_dir = os.path.join(self.dataset_dir, 'usgs_streamflow')
for cat in os.listdir(flow_dir):
cat_dirs = os.listdir(os.path.join(flow_dir, cat))
stn_file = f'{station}_streamflow_qc.txt'
if stn_file in cat_dirs:
fpath = os.path.join(flow_dir, f'{cat}{SEP}{stn_file}')
df1 = pd.read_csv(fpath, sep="\s+|;|:'",
names=['station', 'Year', 'Month', 'Day', 'Flow', 'Flag'],
engine='python')
df1.index = pd.to_datetime(
df1['Year'].map(str) + '-' + df1['Month'].map(str) + '-' + df1['Day'].map(str))
out_df = pd.concat([df[['dayl(s)',
'prcp(mm/day)', 'srad(W/m2)', 'swe(mm)', 'tmax(C)', 'tmin(C)', 'vp(Pa)']],
df1['Flow']],
axis=1)
dyn[station] = out_df
return dyn
def fetch_static_features(
self,
stn_id: Union[str, List[str]],
features:Union[str, List[str]]=None
):
"""
gets one or more static features of one or more stations
Parameters
----------
stn_id : str
name/id of station of which to extract the data
features : list/str, optional (default="all")
The name/names of features to fetch. By default, all available
static features are returned.
Examples
--------
>>> from ai4water.datasets import CAMELS_US
>>> camels = CAMELS_US()
>>> st_data = camels.fetch_static_features('11532500')
>>> st_data.shape
(1, 59)
get names of available static features
>>> camels.static_features
get specific features of one station
>>> static_data = camels.fetch_static_features('11528700',
>>> features=['area_gages2', 'geol_porostiy', 'soil_conductivity', 'elev_mean'])
>>> static_data.shape
(1, 4)
get names of allstations
>>> all_stns = camels.stations()
>>> len(all_stns)
671
>>> all_static_data = camels.fetch_static_features(all_stns)
>>> all_static_data.shape
(671, 59)
"""
attributes = check_attributes(features, self.static_features)
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, 'catchment_attrs', 'camels_attributes_v2.0')}/*.txt")
static_df = pd.DataFrame()
for f in files:
# index should be read as string
idx = pd.read_csv(f, sep=';', usecols=['gauge_id'], dtype=str)
_df = pd.read_csv(f, sep=';', index_col='gauge_id')
_df.index = idx['gauge_id']
static_df = pd.concat([static_df, _df], axis=1)
static_df.to_csv(static_fpath, index_label='gauge_id')
else: # index should be read as string bcs it has 0s at the start
idx = pd.read_csv(static_fpath, usecols=['gauge_id'], dtype=str)
static_df = pd.read_csv(static_fpath, index_col='gauge_id')
static_df.index = idx['gauge_id']
static_df.index = static_df.index.astype(str)
df = static_df.loc[stn_id][attributes]
if isinstance(df, pd.Series):
df = pd.DataFrame(df).transpose()
return df
class CAMELS_BR(Camels):
"""
Downloads and processes CAMELS dataset of Brazil
Examples
--------
>>> from ai4water.datasets import CAMELS_BR
>>> dataset = CAMELS_BR(path=r'F:\data\CAMELS\CAMELS_BR')
>>> df = dataset.fetch(stations=1, as_dataframe=True)
>>> df = df.unstack() # the returned dataframe is a multi-indexed dataframe so we have to unstack it
>>> df.shape
(14245, 12)
# get name of all stations as list
>>> stns = dataset.stations()
>>> len(stns)
593
# get data by station id
>>> df = dataset.fetch(stations='46035000', as_dataframe=True).unstack()
>>> df.shape
(14245, 12)
# get names of available dynamic features
>>> dataset.dynamic_features
# get only selected dynamic features
>>> df = dataset.fetch(1, as_dataframe=True,
... dynamic_features=['precipitation_cpc', 'evapotransp_mgb', 'temperature_mean', 'streamflow_m3s']).unstack()
>>> df.shape
(14245, 4)
# get names of available static features
>>> dataset.static_features
# get data of 10 random stations
>>> df = dataset.fetch(10, as_dataframe=True)
>>> df.shape
(170940, 10) # remember this is multi-indexed DataFrame
"""
url = "https://zenodo.org/record/3964745#.YA6rUxZS-Uk"
folders = {'streamflow_m3s': '02_CAMELS_BR_streamflow_m3s',
'streamflow_mm': '03_CAMELS_BR_streamflow_mm_selected_catchments',
'simulated_streamflow_m3s': '04_CAMELS_BR_streamflow_simulated',
'precipitation_cpc': '07_CAMELS_BR_precipitation_cpc',
'precipitation_mswep': '06_CAMELS_BR_precipitation_mswep',
'precipitation_chirps': '05_CAMELS_BR_precipitation_chirps',
'evapotransp_gleam': '08_CAMELS_BR_evapotransp_gleam',
'evapotransp_mgb': '09_CAMELS_BR_evapotransp_mgb',
'potential_evapotransp_gleam': '10_CAMELS_BR_potential_evapotransp_gleam',
'temperature_min': '11_CAMELS_BR_temperature_min_cpc',
'temperature_mean': '12_CAMELS_BR_temperature_mean_cpc',
'temperature_max': '13_CAMELS_BR_temperature_max_cpc'
}
def __init__(self, path=None):
super().__init__(path=path, name="CAMELS-BR")
self.ds_dir = path
self._download()
self._maybe_to_netcdf('camels_dyn_br')
@property
def _all_dirs(self):
"""All the folders in the dataset_directory"""
return [f for f in os.listdir(self.ds_dir) if os.path.isdir(os.path.join(self.ds_dir, f))]
@property
def static_dir(self):
path = None
for _dir in self._all_dirs:
if "attributes" in _dir:
# supposing that 'attributes' axist in only one file/folder in self.ds_dir
path = os.path.join(self.ds_dir, f'{_dir}{SEP}{_dir}')
return path
@property
def static_files(self):
all_files = None
if self.static_dir is not None:
all_files = glob.glob(f"{self.static_dir}/*.txt")
return all_files
@property
def dynamic_features(self) -> list:
return list(CAMELS_BR.folders.keys())
@property
def static_attribute_categories(self):
static_attrs = []
for f in self.static_files:
ff = str(os.path.basename(f).split('.txt')[0])
static_attrs.append('_'.join(ff.split('_')[2:]))
return static_attrs
@property
def static_features(self):
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(
f"{os.path.join(self.ds_dir, '01_CAMELS_BR_attributes', '01_CAMELS_BR_attributes')}/*.txt")
cols = []
for f in files:
_df = pd.read_csv(f, sep=' ', index_col='gauge_id', nrows=1)
cols += list(_df.columns)
else:
df = pd.read_csv(static_fpath, index_col='gauge_id', nrows=1)
cols = list(df.columns)
return cols
@property
def start(self):
return "19800101"
@property
def end(self):
return "20181231"
def all_stations(self, attribute) -> list:
"""Tells all station ids for which a data of a specific attribute is available."""
all_files = []
for _attr, _dir in self.folders.items():
if attribute in _attr:
all_files = os.listdir(os.path.join(self.ds_dir, f'{_dir}{SEP}{_dir}'))
stations = []
for f in all_files:
stations.append(str(f.split('_')[0]))
return stations
def stations(self, to_exclude=None) -> list:
"""Returns a list of station ids which are common among all dynamic
attributes.
Example
-------
>>> dataset = CAMELS_BR()
>>> stations = dataset.stations()
"""
if to_exclude is not None:
if not isinstance(to_exclude, list):
assert isinstance(to_exclude, str)
to_exclude = [to_exclude]
else:
to_exclude = []
stations = {}
for dyn_attr in self.dynamic_features:
if dyn_attr not in to_exclude:
stations[dyn_attr] = self.all_stations(dyn_attr)
stns = list(set.intersection(*map(set, list(stations.values()))))
return stns
def _read_dynamic_from_csv(self,
stations,
attributes: Union[str, list] = 'all',
st=None,
en=None,
):
"""
returns the dynamic/time series attribute/attributes for one station id.
Example
-------
>>> dataset = CAMELS_BR()
>>> pcp = dataset.fetch_dynamic_features('10500000', 'precipitation_cpc')
... # fetch all time series data associated with a station.
>>> x = dataset.fetch_dynamic_features('51560000', dataset.dynamic_features)
"""
attributes = check_attributes(attributes, self.dynamic_features)
dyn = {}
for stn_id in stations:
# making one separate dataframe for one station
data = pd.DataFrame()
for attr, _dir in self.folders.items():
if attr in attributes:
path = os.path.join(self.ds_dir, f'{_dir}{SEP}{_dir}')
# supposing that the filename starts with stn_id and has .txt extension.
fname = [f for f in os.listdir(path) if f.startswith(str(stn_id)) and f.endswith('.txt')]
fname = fname[0]
if os.path.exists(os.path.join(path, fname)):
df = pd.read_csv(os.path.join(path, fname), sep=' ')
df.index = pd.to_datetime(df[['year', 'month', 'day']])
df.index.freq = pd.infer_freq(df.index)
df = df[st:en]
# only read one column which matches the attr
# todo, qual_flag maybe important
[df.pop(item) for item in df.columns if item != attr]
data = pd.concat([data, df], axis=1)
else:
raise FileNotFoundError(f"file {fname} not found at {path}")
dyn[stn_id] = data
return dyn
def fetch_static_features(
self,
stn_id: Union[str, List[str]],
features:Union[str, List[str]]=None
) -> pd.DataFrame:
"""
Parameters
----------
stn_id : int/list
station id whose attribute to fetch
features : str/list
name of attribute to fetch. Default is None, which will return all the
attributes for a particular station of the specified category.
Example
-------
>>> dataset = Camels('CAMELS-BR')
>>> df = dataset.fetch_static_features('11500000', 'climate')
# read all static features of all stations
>>> data = dataset.fetch_static_features(dataset.stations(), dataset.static_features)
>>> data.shape
(597, 67)
"""
if isinstance(stn_id, int):
station = [str(stn_id)]
elif isinstance(stn_id, list):
station = [str(stn) for stn in stn_id]
elif isinstance(stn_id, str):
station = [stn_id]
else:
raise ValueError
attributes = check_attributes(features, self.static_features)
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(
f"{os.path.join(self.ds_dir, '01_CAMELS_BR_attributes', '01_CAMELS_BR_attributes')}/*.txt")
static_df = pd.DataFrame()
for f in files:
_df = pd.read_csv(f, sep=' ', index_col='gauge_id')
static_df = pd.concat([static_df, _df], axis=1)
static_df.to_csv(static_fpath, index_label='gauge_id')
else:
static_df = pd.read_csv(static_fpath, index_col='gauge_id')
static_df.index = static_df.index.astype(str)
return pd.DataFrame(static_df.loc[station][attributes])
class CAMELS_GB(Camels):
"""
This dataset must be manually downloaded by the user.
The path of the downloaded folder must be provided while initiating this class.
"""
dynamic_features = ["precipitation", "pet", "temperature", "discharge_spec",
"discharge_vol", "peti",
"humidity", "shortwave_rad", "longwave_rad", "windspeed"]
def __init__(self, path=None):
super().__init__(name="CAMELS-GB", path=path)
self._maybe_to_netcdf('camels_gb_dyn')
@property
def ds_dir(self):
"""Directory where a particular dataset will be saved. """
return self._ds_dir
@ds_dir.setter
def ds_dir(self, x):
sanity_check('CAMELS-GB', x)
self._ds_dir = x
@property
def static_attribute_categories(self) -> list:
attributes = []
path = os.path.join(self.ds_dir, 'data')
for f in os.listdir(path):
if os.path.isfile(os.path.join(path, f)) and f.endswith('csv'):
attributes.append(f.split('_')[2])
return attributes
@property
def start(self):
return "19701001"
@property
def end(self):
return "20150930"
@property
def static_features(self):
files = glob.glob(f"{os.path.join(self.ds_dir, 'data')}/*.csv")
cols = []
for f in files:
if 'static_features.csv' not in f:
df = pd.read_csv(f, nrows=1, index_col='gauge_id')
cols += (list(df.columns))
return cols
def stations(self, to_exclude=None):
# CAMELS_GB_hydromet_timeseries_StationID_number
path = os.path.join(self.ds_dir, f'data{SEP}timeseries')
gauge_ids = []
for f in os.listdir(path):
gauge_ids.append(f.split('_')[4])
return gauge_ids
def _read_dynamic_from_csv(self,
stations,
attributes: Union[str, list] = 'all',
st=None,
en=None,
):
"""Fetches dynamic attribute/attributes of one station."""
dyn = {}
for stn_id in stations:
# making one separate dataframe for one station
path = os.path.join(self.ds_dir, f"data{SEP}timeseries")
fname = None
for f in os.listdir(path):
if stn_id in f:
fname = f
break
df = pd.read_csv(os.path.join(path, fname), index_col='date')
df.index = pd.to_datetime(df.index)
df.index.freq = pd.infer_freq(df.index)
dyn[stn_id] = df
return dyn
def fetch_static_features(
self,
stn_id: Union[str, List[str]],
features:Union[str, List[str]]="all"
) -> pd.DataFrame:
"""
Fetches static attributes of one or more stations for one or
more category as dataframe.
Parameters
----------
stn_id : str
name/id of station of which to extract the data
features : list/str, optional (default="all")
The name/names of features to fetch. By default, all available
static features are returned.
Examples
---------
>>> from ai4water.datasets import CAMELS_GB
>>> dataset = CAMELS_GB()
get the names of stations
>>> stns = dataset.stations()
>>> len(stns)
671
get all static data of all stations
>>> static_data = dataset.fetch_static_features(stns)
>>> static_data.shape
(671, 290)
get static data of one station only
>>> static_data = dataset.fetch_static_features('85004')
>>> static_data.shape
(1, 290)
get the names of static features
>>> dataset.static_features
get only selected features of all stations
>>> static_data = dataset.fetch_static_features(stns, ['area', 'elev_mean'])
>>> static_data.shape
(671, 2)
"""
attributes = check_attributes(features, self.static_features)
static_fname = 'static_features.csv'
static_fpath = os.path.join(self.ds_dir, 'data', static_fname)
if os.path.exists(static_fpath):
static_df = pd.read_csv(static_fpath, index_col='gauge_id')
else:
files = glob.glob(f"{os.path.join(self.ds_dir, 'data')}/*.csv")
static_df = pd.DataFrame()
for f in files:
_df = pd.read_csv(f, index_col='gauge_id')
static_df = pd.concat([static_df, _df], axis=1)
static_df.to_csv(static_fpath)
if isinstance(stn_id, str):
station = [stn_id]
elif isinstance(stn_id, int):
station = [str(stn_id)]
elif isinstance(stn_id, list):
station = [str(stn) for stn in stn_id]
else:
raise ValueError
static_df.index = static_df.index.astype(str)
return static_df.loc[station][attributes]
class CAMELS_AUS(Camels):
"""
Inherits from Camels class. Reads CAMELS-AUS dataset of
`Fowler et al., 2020 <https://doi.org/10.5194/essd-13-3847-2021>`_
dataset.
Examples
--------
>>> from ai4water.datasets import CAMELS_AUS
>>> dataset = CAMELS_AUS()
>>> df = dataset.fetch(stations=1, as_dataframe=True)
>>> df = df.unstack() # the returned dataframe is a multi-indexed dataframe so we have to unstack it
>>> df.shape
(21184, 26)
... # get name of all stations as list
>>> stns = dataset.stations()
>>> len(stns)
222
... # get data by station id
>>> df = dataset.fetch(stations='224214A', as_dataframe=True).unstack()
>>> df.shape
(21184, 26)
... # get names of available dynamic features
>>> dataset.dynamic_features
... # get only selected dynamic features
>>> data = dataset.fetch(1, as_dataframe=True,
... dynamic_features=['tmax_AWAP', 'precipitation_AWAP', 'et_morton_actual_SILO', 'streamflow_MLd']).unstack()
>>> data.shape
(21184, 4)
... # get names of available static features
>>> dataset.static_features
... # get data of 10 random stations
>>> df = dataset.fetch(10, as_dataframe=True)
>>> df.shape # remember this is a multiindexed dataframe
(21184, 260)
"""
url = 'https://doi.pangaea.de/10.1594/PANGAEA.921850'
urls = {
"01_id_name_metadata.zip": "https://download.pangaea.de/dataset/921850/files/",
"02_location_boundary_area.zip": "https://download.pangaea.de/dataset/921850/files/",
"03_streamflow.zip": "https://download.pangaea.de/dataset/921850/files/",
"04_attributes.zip": "https://download.pangaea.de/dataset/921850/files/",
"05_hydrometeorology.zip": "https://download.pangaea.de/dataset/921850/files/",
"CAMELS_AUS_Attributes-Indices_MasterTable.csv": "https://download.pangaea.de/dataset/921850/files/",
"Units_01_TimeseriesData.pdf": "https://download.pangaea.de/dataset/921850/files/",
"Units_02_AttributeMasterTable.pdf": "https://download.pangaea.de/dataset/921850/files/",
}
folders = {
'streamflow_MLd': f'03_streamflow{SEP}03_streamflow{SEP}streamflow_MLd',
'streamflow_MLd_inclInfilled': f'03_streamflow{SEP}03_streamflow{SEP}streamflow_MLd_inclInfilled',
'streamflow_mmd': f'03_streamflow{SEP}03_streamflow{SEP}streamflow_mmd',
'et_morton_actual_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}et_morton_actual_SILO',
'et_morton_point_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}et_morton_point_SILO',
'et_morton_wet_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}et_morton_wet_SILO',
'et_short_crop_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}et_short_crop_SILO',
'et_tall_crop_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}et_tall_crop_SILO',
'evap_morton_lake_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}evap_morton_lake_SILO',
'evap_pan_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}evap_pan_SILO',
'evap_syn_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}02_EvaporativeDemand_timeseries{SEP}evap_syn_SILO',
'precipitation_AWAP': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}01_precipitation_timeseries{SEP}precipitation_AWAP',
'precipitation_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}01_precipitation_timeseries{SEP}precipitation_SILO',
'precipitation_var_SWAP': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}01_precipitation_timeseries{SEP}precipitation_var_AWAP',
'solarrad_AWAP': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}AWAP{SEP}solarrad_AWAP',
'tmax_AWAP': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}AWAP{SEP}tmax_AWAP',
'tmin_AWAP': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}AWAP{SEP}tmin_AWAP',
'vprp_AWAP': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}AWAP{SEP}vprp_AWAP',
'mslp_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}mslp_SILO',
'radiation_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}radiation_SILO',
'rh_tmax_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}rh_tmax_SILO',
'rh_tmin_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}rh_tmin_SILO',
'tmax_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}tmax_SILO',
'tmin_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}tmin_SILO',
'vp_deficit_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}vp_deficit_SILO',
'vp_SILO': f'05_hydrometeorology{SEP}05_hydrometeorology{SEP}03_Other{SEP}SILO{SEP}vp_SILO',
}
def __init__(self, path: str = None):
"""
Arguments:
path: path where the CAMELS-AUS dataset has been downloaded. This path
must contain five zip files and one xlsx file. If None, then the
data will downloaded.
"""
if path is not None:
assert isinstance(path, str), f'path must be string like but it is "{path}" of type {path.__class__.__name__}'
if not os.path.exists(path) or len(os.listdir(path)) < 2:
raise FileNotFoundError(f"The path {path} does not exist")
self.ds_dir = path
super().__init__(path=path)
if not os.path.exists(self.ds_dir):
os.makedirs(self.ds_dir)
for _file, url in self.urls.items():
fpath = os.path.join(self.ds_dir, _file)
if not os.path.exists(fpath):
download(url + _file, fpath)
_unzip(self.ds_dir)
self._maybe_to_netcdf('camels_aus_dyn')
@property
def start(self):
return "19570101"
@property
def end(self):
return "20181231"
@property
def location(self):
return "Australia"
def stations(self, as_list=True) -> list:
fname = os.path.join(self.ds_dir, f"01_id_name_metadata{SEP}01_id_name_metadata{SEP}id_name_metadata.csv")
df = pd.read_csv(fname)
if as_list:
return df['station_id'].to_list()
else:
return df
@property
def static_attribute_categories(self):
attributes = []
path = os.path.join(self.ds_dir, f'04_attributes{SEP}04_attributes')
for f in os.listdir(path):
if os.path.isfile(os.path.join(path, f)) and f.endswith('csv'):
f = str(f.split('.csv')[0])
attributes.append(''.join(f.split('_')[2:]))
return attributes
@property
def static_features(self) -> list:
static_fpath = os.path.join(self.ds_dir, 'static_features.csv')
if not os.path.exists(static_fpath):
files = glob.glob(f"{os.path.join(self.ds_dir, '04_attributes', '04_attributes')}/*.csv")
cols = []
for f in files:
_df = pd.read_csv(f, index_col='station_id', nrows=1)
cols += list(_df.columns)
else:
df = pd.read_csv(static_fpath, index_col='station_id', nrows=1)
cols = list(df.columns)
return cols
@property
def dynamic_features(self) -> list:
return list(self.folders.keys())
def _read_static(self, stations, attributes,
st=None, en=None):
attributes = check_attributes(attributes, self.static_features)
static_fname = 'static_features.csv'
static_fpath = os.path.join(self.ds_dir, static_fname)
if os.path.exists(static_fpath):
static_df = pd.read_csv(static_fpath, index_col='station_id')
else:
files = glob.glob(f"{os.path.join(self.ds_dir, '04_attributes', '04_attributes')}/*.csv")
static_df = pd.DataFrame()
for f in files:
_df = pd.read_csv(f, index_col='station_id')
static_df = pd.concat([static_df, _df], axis=1)
static_df.to_csv(static_fpath)
static_df.index = static_df.index.astype(str)
df = static_df.loc[stations][attributes]
if isinstance(df, pd.Series):
df = pd.DataFrame(df).transpose()
return self.to_ts(df, st, en)
def _read_dynamic_from_csv(self, stations, dynamic_features, **kwargs):
dyn_attrs = {}
dyn = {}
for _attr in dynamic_features:
_path = os.path.join(self.ds_dir, f'{self.folders[_attr]}.csv')
_df = pd.read_csv(_path, na_values=['-99.99'])
_df.index = pd.to_datetime(_df[['year', 'month', 'day']])
[_df.pop(col) for col in ['year', 'month', 'day']]
dyn_attrs[_attr] = _df
# making one separate dataframe for one station
for stn in stations:
stn_df = pd.DataFrame()
for attr, attr_df in dyn_attrs.items():
if attr in dynamic_features:
stn_df[attr] = attr_df[stn]
dyn[stn] = stn_df
return dyn
def fetch_static_features(
self,
stn_id: Union[str, List[str]],
features:Union[str, List[str]]="all",
**kwargs
) -> pd.DataFrame:
"""Fetches static attribuets of one or more stations as dataframe.
Parameters
----------
stn_id : str
name/id of station of which to extract the data
features : list/str, optional (default="all")
The name/names of features to fetch. By default, all available
static features are returned.
Examples
---------
>>> from ai4water.datasets import CAMELS_AUS
>>> dataset = CAMELS_AUS()
get the names of stations
>>> stns = dataset.stations()
>>> len(stns)
222
get all static data of all stations
>>> static_data = dataset.fetch_static_features(stns)
>>> static_data.shape
(222, 110)
get static data of one station only
>>> static_data = dataset.fetch_static_features('305202')
>>> static_data.shape
(1, 110)
get the names of static features
>>> dataset.static_features
get only selected features of all stations
>>> static_data = dataset.fetch_static_features(stns, ['catchment_di', 'elev_mean'])
>>> static_data.shape
(222, 2)
"""
return self._read_static(stn_id, features)
def plot(self, what, stations=None, **kwargs):
assert what in ['outlets', 'boundaries']
f1 = os.path.join(self.ds_dir,
f'02_location_boundary_area{SEP}02_location_boundary_area{SEP}shp{SEP}CAMELS_AUS_BasinOutlets_adopted.shp')
f2 = os.path.join(self.ds_dir,
f'02_location_boundary_area{SEP}02_location_boundary_area{SEP}shp{SEP}bonus data{SEP}Australia_boundaries.shp')
if plot_shapefile is not None:
return plot_shapefile(f1, bbox_shp=f2, recs=stations, rec_idx=0, **kwargs)
else:
raise ModuleNotFoundError("Shapely must be installed in order to plot the datasets.")
class CAMELS_CL(Camels):
"""
Downloads and processes CAMELS dataset of Chile following the work of
Alvarez-Garreton_ et al., 2018 .
Examples
---------
>>> from ai4water.datasets import CAMELS_CL
>>> dataset = CAMELS_CL()
>>> df = dataset.fetch(stations=1, as_dataframe=True)
>>> df = df.unstack() # the returned dataframe is a multi-indexed dataframe so we have to unstack it
>>> df.shape
(38374, 12)
# get name of all stations as list
>>> stns = dataset.stations()
>>> len(stns)
516
# get data by station id
>>> df = dataset.fetch(stations='11130001', as_dataframe=True).unstack()
>>> df.shape
(38374, 12)
# get names of available dynamic features
>>> dataset.dynamic_features
# get only selected dynamic features
>>> df = dataset.fetch(1, as_dataframe=True,
... dynamic_features=['pet_hargreaves', 'precip_tmpa', 'tmean_cr2met', 'streamflow_m3s']).unstack()
>>> df.shape
(38374, 4)
# get names of available static features
>>> dataset.static_features
# get data of 10 random stations
>>> df = dataset.fetch(10, as_dataframe=True)
>>> df.shape
(460488, 10)
.. _Alvarez-Garreton: https://doi.org/10.5194/hess-22-5817-2018
"""
urls = {
"1_CAMELScl_attributes.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"2_CAMELScl_streamflow_m3s.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"3_CAMELScl_streamflow_mm.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"4_CAMELScl_precip_cr2met.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"5_CAMELScl_precip_chirps.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"6_CAMELScl_precip_mswep.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"7_CAMELScl_precip_tmpa.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"8_CAMELScl_tmin_cr2met.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"9_CAMELScl_tmax_cr2met.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"10_CAMELScl_tmean_cr2met.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"11_CAMELScl_pet_8d_modis.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"12_CAMELScl_pet_hargreaves.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"13_CAMELScl_swe.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"14_CAMELScl_catch_hierarchy.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
"CAMELScl_catchment_boundaries.zip": "https://store.pangaea.de/Publications/Alvarez-Garreton-etal_2018/",
}
dynamic_features = ['streamflow_m3s', 'streamflow_mm',
'precip_cr2met', 'precip_chirps', 'precip_mswep', 'precip_tmpa',
'tmin_cr2met', 'tmax_cr2met', 'tmean_cr2met',
'pet_8d_modis', 'pet_hargreaves',
'swe'
]
def __init__(self,
path: str = None
):
"""
Arguments:
path: path where the CAMELS-CL dataset has been downloaded. This path must
contain five zip files and one xlsx file.
"""
super().__init__(path=path)
self.ds_dir = path
if not os.path.exists(self.ds_dir):
os.makedirs(self.ds_dir)
for _file, url in self.urls.items():
fpath = os.path.join(self.ds_dir, _file)
if not os.path.exists(fpath):
download(url + _file, fpath)
_unzip(self.ds_dir)
self.dyn_fname = os.path.join(self.ds_dir, 'camels_cl_dyn.nc')
self._maybe_to_netcdf('camels_cl_dyn')
@property
def _all_dirs(self):
"""All the folders in the dataset_directory"""
return [f for f in os.listdir(self.ds_dir) if os.path.isdir(os.path.join(self.ds_dir, f))]
@property
def start(self):
return "19130215"
@property
def end(self):
return "20180309"
@property
def static_features(self) -> list:
path = os.path.join(self.ds_dir, f"1_CAMELScl_attributes{SEP}1_CAMELScl_attributes.txt")
df = pd.read_csv(path, sep='\t', index_col='gauge_id')
return df.index.to_list()
def stations(self) -> list:
"""
Tells all station ids for which a data of a specific attribute is available.
"""
stn_fname = os.path.join(self.ds_dir, 'stations.json')
if not os.path.exists(stn_fname):
_stations = {}
for dyn_attr in self.dynamic_features:
for _dir in self._all_dirs:
if dyn_attr in _dir:
fname = os.path.join(self.ds_dir, f"{_dir}{SEP}{_dir}.txt")
df = pd.read_csv(fname, sep='\t', nrows=2, index_col='gauge_id')
_stations[dyn_attr] = list(df.columns)
stns = list(set.intersection(*map(set, list(_stations.values()))))
with open(stn_fname, 'w') as fp:
json.dump(stns, fp)
else:
with open(stn_fname, 'r') as fp:
stns = json.load(fp)
return stns
def _read_dynamic_from_csv(self, stations, dynamic_features, st=None, en=None):
dyn = {}
st, en = self._check_length(st, en)
assert all(stn in self.stations() for stn in stations)
dynamic_features = check_attributes(dynamic_features, self.dynamic_features)
# reading all dynnamic attributes
dyn_attrs = {}
for attr in dynamic_features:
fname = [f for f in self._all_dirs if '_' + attr in f][0]
fname = os.path.join(self.ds_dir, f'{fname}{SEP}{fname}.txt')
_df = pd.read_csv(fname, sep='\t', index_col=['gauge_id'], na_values=" ")
_df.index = pd.to_datetime(_df.index)
dyn_attrs[attr] = _df[st:en]
# making one separate dataframe for one station
for stn in stations:
stn_df = pd.DataFrame()
for attr, attr_df in dyn_attrs.items():
if attr in dynamic_features:
stn_df[attr] = attr_df[stn]
dyn[stn] = stn_df[st:en]
return dyn
def _read_static(self, stations: list, attributes: list) -> pd.DataFrame:
# overwritten for speed
path = os.path.join(self.ds_dir, f"1_CAMELScl_attributes{SEP}1_CAMELScl_attributes.txt")
_df = pd.read_csv(path, sep='\t', index_col='gauge_id')
stns_df = []
for stn in stations:
df = pd.DataFrame()
if stn in _df:
df[stn] = _df[stn]
elif ' ' + stn in _df:
df[stn] = _df[' ' + stn]
stns_df.append(df.transpose()[attributes])
stns_df = pd.concat(stns_df)
return stns_df
def fetch_static_features(
self,
stn_id: Union[str, List[str]],
features:Union[str, List[str]]=None
):
"""
Returns static features of one or more stations.
Parameters
----------
stn_id : str
name/id of station of which to extract the data
features : list/str, optional (default="all")
The name/names of features to fetch. By default, all available
static features are returned.
Examples
---------
>>> from ai4water.datasets import CAMELS_CL
>>> dataset = CAMELS_CL()
get the names of stations
>>> stns = dataset.stations()
>>> len(stns)
516
get all static data of all stations
>>> static_data = dataset.fetch_static_features(stns)
>>> static_data.shape
(516, 104)
get static data of one station only
>>> static_data = dataset.fetch_static_features('11315001')
>>> static_data.shape
(1, 104)
get the names of static features
>>> dataset.static_features
get only selected features of all stations
>>> static_data = dataset.fetch_static_features(stns, ['slope_mean', 'area'])
>>> static_data.shape
(516, 2)
>>> data = dataset.fetch_static_features('2110002', features=['slope_mean', 'area'])
>>> data.shape
(1, 2)
"""
attributes = check_attributes(features, self.static_features)
if isinstance(stn_id, str):
stn_id = [stn_id]
return self._read_static(stn_id, attributes)
class HYPE(Camels):
"""
Downloads and preprocesses HYPE [1]_ dataset from Lindstroem et al., 2010 [2]_ .
This is a rainfall-runoff dataset of Sweden of 564 stations from 1985 to
2019 at daily, monthly and yearly time steps.
Examples
--------
>>> from ai4water.datasets import HYPE
>>> dataset = HYPE()
... # get data of 5% of stations
>>> df = dataset.fetch(stations=0.05, as_dataframe=True) # returns a multiindex dataframe
>>> df.shape
(115047, 28)
... # fetch data of 5 (randomly selected) stations
>>> df = dataset.fetch(stations=5, as_dataframe=True)
>>> df.shape
(115047, 5)
fetch data of 3 selected stations
>>> df = dataset.fetch(stations=['564','563','562'], as_dataframe=True)
>>> df.shape
(115047, 3)
... # fetch data of a single stations
>>> df = dataset.fetch(stations='500', as_dataframe=True)
(115047, 1)
# get only selected dynamic features
>>> df = dataset.fetch(stations='501',
... dynamic_features=['AET_mm', 'Prec_mm', 'Streamflow_mm'], as_dataframe=True)
# fetch data between selected periods
>>> df = dataset.fetch(stations='225', st="20010101", en="20101231", as_dataframe=True)
>>> df.shape
(32868, 1)
... # get data at monthly time step
>>> dataset = HYPE(time_step="month")
>>> df = dataset.fetch(stations='500', as_dataframe=True)
>>> df.shape
(3780, 1)
.. [1] https://zenodo.org/record/4029572
.. [2] https://doi.org/10.2166/nh.2010.007
"""
url = [
"https://zenodo.org/record/581435",
"https://zenodo.org/record/4029572"
]
dynamic_features = [
'AET_mm',
'Baseflow_mm',
'Infiltration_mm',
'SM_mm',
'Streamflow_mm',
'Runoff_mm',
'Qsim_m3-s',
'Prec_mm',
'PET_mm'
]
def __init__(self, time_step: str = 'daily', path = None, **kwargs):
"""
Parameters
----------
time_step : str
one of ``daily``, ``month`` or ``year``
**kwargs
key word arguments
"""
assert time_step in ['daily', 'month', 'year']
self.time_step = time_step
self.ds_dir = path
super().__init__(path=path, **kwargs)
self._download()
fpath = os.path.join(self.ds_dir, 'hype_year_dyn.nc')
if not os.path.exists(fpath):
self.time_step = 'daily'
self._maybe_to_netcdf('hype_daily_dyn')
self.time_step = 'month'
self._maybe_to_netcdf('hype_month_dyn')
self.time_step = 'year'
self._maybe_to_netcdf('hype_year_dyn')
self.time_step = time_step
self.dyn_fname = os.path.join(self.ds_dir, f'hype_{time_step}_dyn.nc')
def stations(self) -> list:
_stations = np.arange(1, 565).astype(str)
return list(_stations)
@property
def static_features(self):
return []
def _read_dynamic_from_csv(self,
stations: list,
attributes: Union[str, list] = 'all',
st=None,
en=None,
):
dynamic_features = check_attributes(attributes, self.dynamic_features)
_dynamic_attributes = []
for dyn_attr in dynamic_features:
pref, suff = dyn_attr.split('_')[0], dyn_attr.split('_')[-1]
_dyn_attr = f"{pref}_{self.time_step}_{suff}"
_dynamic_attributes.append(_dyn_attr)
df_attrs = {}
for dyn_attr in _dynamic_attributes:
fname = f"{dyn_attr}.csv"
fpath = os.path.join(self.ds_dir, fname)
index_col_name = 'DATE'
if fname in ['SM_month_mm.csv', 'SM_year_mm.csv']:
index_col_name = 'Date'
_df = pd.read_csv(fpath, index_col=index_col_name)
_df.index = pd.to_datetime(_df.index)
# todo, some stations have wider range than self.st/self.en
df_attrs[dyn_attr] = _df.loc[self.start:self.end]
stns_dfs = {}
for st in stations:
stn_dfs = []
cols = []
for dyn_attr, dyn_df in df_attrs.items():
stn_dfs.append(dyn_df[st])
col_name = f"{dyn_attr.split('_')[0]}_{dyn_attr.split('_')[-1]}" # get original name without time_step
cols.append(col_name)
stn_df = pd.concat(stn_dfs, axis=1)
stn_df.columns = cols
stns_dfs[st] = stn_df
return stns_dfs
def fetch_static_features(self, stn_id, features=None):
"""static data for HYPE is not available."""
raise ValueError(f'No static feature for {self.name}')
@property
def start(self):
return '19850101'
@property
def end(self):
return '20191231'
class WaterBenchIowa(Camels):
"""
Rainfall run-off dataset for Iowa (US) following the work of
`Demir et al., 2022 <https://doi.org/10.5194/essd-14-5605-2022>`_
Examples
--------
>>> from ai4water.datasets import WaterBenchIowa
>>> ds = WaterBenchIowa()
... # fetch static and dynamic features of 5 stations
>>> data = ds.fetch(5, as_dataframe=True)
>>> data.shape # it is a multi-indexed DataFrame
(184032, 5)
... # fetch both static and dynamic features of 5 stations
>>> data = ds.fetch(5, static_features="all", as_dataframe=True)
>>> data.keys()
dict_keys(['dynamic', 'static'])
>>> data['static'].shape
(5, 7)
>>> data['dynamic'] # returns a xarray DataSet
... # using another method
>>> data = ds.fetch_dynamic_features('644', as_dataframe=True)
>>> data.unstack().shape
(61344, 3)
"""
url = "https://zenodo.org/record/7087806#.Y6rW-BVByUk"
def __init__(self, path=None):
super(WaterBenchIowa, self).__init__(path=path)
self._download()
self._maybe_to_netcdf('WaterBenchIowa.nc')
def stations(self)->List[str]:
return [fname.split('_')[0] for fname in os.listdir(self.ts_path) if fname.endswith('.csv')]
@property
def ts_path(self)->str:
return os.path.join(self.ds_dir, 'data_time_series', 'data_time_series')
@property
def dynamic_features(self) -> List[str]:
return ['precipitation', 'et', 'discharge']
@property
def static_features(self)->List[str]:
return ['travel_time', 'area', 'slope', 'loam', 'silt',
'sandy_clay_loam', 'silty_clay_loam']
def fetch_station_attributes(
self,
station: str,
dynamic_features: Union[str, list, None] = 'all',
static_features: Union[str, list, None] = None,
as_ts: bool = False,
st: Union[str, None] = None,
en: Union[str, None] = None,
**kwargs
) -> pd.DataFrame:
"""
Examples
--------
>>> from ai4water.datasets import WaterBenchIowa
>>> dataset = WaterBenchIowa()
>>> data = dataset.fetch_station_attributes('666')
"""
check_attributes(dynamic_features, self.dynamic_features)
fname = os.path.join(self.ts_path, f"{station}_data.csv")
df = pd.read_csv(fname)
df.index = pd.to_datetime(df.pop('datetime'))
return df
def fetch_static_features(
self,
stn_id: Union[str, List[str]],
features:Union[str, List[str]]=None
)->pd.DataFrame:
"""
Parameters
----------
stn_id : str
name/id of station of which to extract the data
features : list/str, optional (default="all")
The name/names of features to fetch. By default, all available
static features are returned.
Examples
---------
>>> from ai4water.datasets import WaterBenchIowa
>>> dataset = WaterBenchIowa()
get the names of stations
>>> stns = dataset.stations()
>>> len(stns)
125
get all static data of all stations
>>> static_data = dataset.fetch_static_features(stns)
>>> static_data.shape
(125, 7)
get static data of one station only
>>> static_data = dataset.fetch_static_features('592')
>>> static_data.shape
(1, 7)
get the names of static features
>>> dataset.static_features
get only selected features of all stations
>>> static_data = dataset.fetch_static_features(stns, ['slope', 'area'])
>>> static_data.shape
(125, 2)
>>> data = dataset.fetch_static_features('592', features=['slope', 'area'])
>>> data.shape
(1, 2)
"""
if not isinstance(stn_id, list):
stn_id = [stn_id]
features = check_attributes(features, self.static_features)
dfs = []
for stn in stn_id:
fname = os.path.join(self.ts_path, f"{stn}_data.csv")
df = pd.read_csv(fname, nrows=1)
dfs.append(df[features])
return pd.concat(dfs)
def _read_dynamic_from_csv(
self,
stations,
dynamic_features,
st=None,
en=None)->dict:
dyn = dict()
for stn in stations:
fname = os.path.join(self.ts_path, f"{stn}_data.csv")
df = pd.read_csv(fname)
df.index = pd.to_datetime(df.pop('datetime'))
dyn[stn] = df[self.dynamic_features]
return dyn
@property
def start(self):
return "20111001 12:00"
@property
def end(self):
return "20180930 11:00" | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/datasets/camels.py | camels.py |
__all__ = ["RC4USCoast"]
from typing import Union, List
import numpy as np
from ai4water.backend import pd, os, xr
from ._datasets import Datasets
from .utils import check_st_en
class RC4USCoast(Datasets):
"""
Monthly river water chemistry (N, P, SIO2, DO, ... etc), discharge and temperature of 140
monitoring sites of US coasts from 1950 to 2020 following the work of
`Gomez et al., 2022 <https://doi.org/10.5194/essd-2022-341>`_.
Examples
--------
>>> from ai4water.datasets import RC4USCoast
>>> dataset = RC4USCoast()
"""
url = {
'RC4USCoast.zip':
'https://www.ncei.noaa.gov/data/oceans/ncei/ocads/data/0260455/RC4USCoast.zip',
'info.xlsx':
'https://www.ncei.noaa.gov/data/oceans/ncei/ocads/data/0260455/supplemental/dataset_info.xlsx'
}
def __init__(self, path=None, *args, **kwargs):
"""
Parameters
----------
path :
path where the data is already downloaded. If None, the data will
be downloaded into the disk.
"""
super(RC4USCoast, self).__init__(path=path, *args, **kwargs)
self.ds_dir = path
self._download()
@property
def chem_fname(self)->str:
return os.path.join(self.ds_dir, "RC4USCoast", "series_chem.nc")
@property
def q_fname(self) -> str:
return os.path.join(self.ds_dir, "RC4USCoast", "series_disc.nc")
@property
def info_fname(self) -> str:
return os.path.join(self.ds_dir, "RC4USCoast", "info.xlsx")
@property
def stations(self)->np.ndarray:
"""
>>> from ai4water.datasets import RC4USCoast
>>> ds = RC4USCoast(path=r'F:\data\RC4USCoast')
>>> len(ds.stations)
140
"""
return xr.load_dataset(self.q_fname).RC4USCoast_ID.data
@property
def parameters(self)->List[str]:
"""
>>> from ai4water.datasets import RC4USCoast
>>> ds = RC4USCoast()
>>> len(ds.parameters)
27
"""
df = xr.load_dataset(self.chem_fname)
return list(df.data_vars.keys())
@property
def start(self)->pd.Timestamp:
return pd.Timestamp(xr.load_dataset(self.q_fname).time.data[0])
@property
def end(self)->pd.Timestamp:
return pd.Timestamp(xr.load_dataset(self.q_fname).time.data[-1])
def fetch_chem(
self,
parameter,
stations: Union[List[int], int, str] = "all",
as_dataframe:bool = False,
st: Union[int, str, pd.DatetimeIndex] = None,
en: Union[int, str, pd.DatetimeIndex] = None,
):
"""
Returns water chemistry parameters from one or more stations.
parameters
----------
parameter : list, str
name/names of parameters to fetch
stations : list, str
name/names of stations from which the parameters are to be fetched
as_dataframe : bool (default=False)
whether to return data as pandas.DataFrame or xarray.Dataset
st :
start time of data to be fetched. The default starting
date is 19500101
en :
end time of data to be fetched. The default end date is
20201201
Returns
-------
pandas DataFrame or xarray Dataset
Examples
--------
>>> from ai4water.datasets import RC4USCoast
>>> ds = RC4USCoast()
>>> data = ds.fetch_chem(['temp', 'do'])
>>> data
>>> data = ds.fetch_chem(['temp', 'do'], as_dataframe=True)
>>> data.shape # this is a multi-indexed dataframe
(119280, 4)
>>> data = ds.fetch_chem(['temp', 'do'], st="19800101", en="20181230")
"""
if isinstance(parameter, str):
parameter = [parameter]
ds = xr.load_dataset(self.chem_fname)[parameter]
if stations == "all":
pass
elif not isinstance(stations, list):
stations = [stations]
ds = ds.sel(RC4USCoast_ID=stations)
elif isinstance(stations, list):
ds = ds.sel(RC4USCoast_ID = stations)
else:
assert stations is None
ds = ds.sel(time=slice(st or self.start, en or self.end))
if as_dataframe:
return ds.to_dataframe()
return ds
def fetch_q(
self,
stations:Union[int, List[int], str, np.ndarray] = "all",
as_dataframe:bool=True,
nv=0,
st: Union[int, str, pd.DatetimeIndex] = None,
en: Union[int, str, pd.DatetimeIndex] = None,
):
"""returns discharge data
parameters
-----------
stations :
stations for which q is to be fetched
as_dataframe : bool (default=True)
whether to return the data as pd.DataFrame or as xarray.Dataset
nv : int (default=0)
st :
start time of data to be fetched. The default starting
date is 19500101
en :
end time of data to be fetched. The default end date is
20201201
Examples
--------
>>> from ai4water.datasets import RC4USCoast
>>> ds = RC4USCoast()
# get data of all stations as DataFrame
>>> q = ds.fetch_q("all")
>>> q.shape
(852, 140) # where 140 is the number of stations
# get data of only two stations
>>> q = ds.fetch_q([1,10])
>>> q.shape
(852, 2)
# get data as xarray Dataset
>>> q = ds.fetch_q("all", as_dataframe=False)
>>> type(q)
xarray.core.dataset.Dataset
# getting data between specific periods
>>> data = ds.fetch_q("all", st="20000101", en="20181230")
"""
q = xr.load_dataset(self.q_fname)
if stations:
if stations == "all":
q = q.sel(nv=nv)
elif not isinstance(stations, list):
stations = [stations]
q = q.sel(RC4USCoast_ID=stations, nv=nv)
elif isinstance(stations, list):
q = q.sel(RC4USCoast_ID=stations, nv=nv)
else:
raise ValueError(f"invalid {stations}")
q = q.sel(time=slice(st or self.start, en or self.end))
if as_dataframe:
return q.to_dataframe()['disc'].unstack()
return q | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/datasets/_rc4uscoast.py | _rc4uscoast.py |
__all__ = ["NPCTRCatchments"]
from typing import Union, List, Tuple
from ai4water.backend import pd, os, np
from ._datasets import Datasets
from .utils import check_attributes, sanity_check, check_st_en
class NPCTRCatchments(Datasets):
"""
High-resolution streamflow and weather data (2013–2019) for seven small coastal
watersheds in the northeast Pacific coastal temperate rainforest, Canada following
`Korver et al., 2022 <https://doi.org/10.5194/essd-14-4231-2022>`_
"""
url = {
"2013-2019_Discharge1015_5min.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Discharge1015_5min.csv",
"2013-2019_Discharge626_5min.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Discharge626_5min.csv",
"2013-2019_Discharge693_5min.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Discharge693_5min.csv",
"2013-2019_Discharge703_5min.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Discharge703_5min.csv",
"2013-2019_Discharge708_5min.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Discharge708_5min.csv",
"2013-2019_Discharge819_5min.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Discharge819_5min.csv",
"2013-2019_Discharge844_5min.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Discharge844_5min.csv",
"2013-2019_Discharge_Hourly.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Discharge_Hourly.csv",
"2013-2019_RH_5min.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_RH_5min.csv",
"2013-2019_RH_Hourly.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_RH_Hourly.csv",
"2013-2019_Rad_5min.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Rad_5min.csv",
"2013-2019_Rad_Hourly.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Rad_Hourly.csv",
"2013-2019_Rain_5min.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Rain_5min.csv",
"2013-2019_Rain_Hourly.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Rain_Hourly.csv",
"2013-2019_SnowDepth_Hourly.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_SnowDepth_Hourly.csv",
"2013-2019_Ta_5min.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Ta_5min.csv",
"2013-2019_Ta_Hourly.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_Ta_Hourly.csv",
"2013-2019_WindDir_5min.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_WindDir_5min.csv",
"2013-2019_WindDir_Hourly.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_WindDir_Hourly.csv",
"2013-2019_WindSpd_5min.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_WindSpd_5min.csv",
"2013-2019_WindSpd_Hourly.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/2013-2019_WindSpd_Hourly.csv",
"Data-Dictonary.csv":
"https://media.githubusercontent.com/media/HakaiInstitute/essd2021-hydromet-datapackage/main/Data-Dictonary.csv",
}
def __init__(self, path=None, **kwargs):
super().__init__(path=path, **kwargs)
self.ds_dir = path
self._download()
@property
def stations(self)->List[str]:
return ["626", "693", "703", "708", "819", "844", "1015"]
def fetch_wind_speed(
self,
station,
timestep,
):
_verify_timestep(timestep)
return
@property
def q_attributes(self):
return ["Qrate", "Qrate_min", "Qrate_max", "Qvol", "Qvol_min", "Qvol_max",
"Qmm", "Qmm_min", "Qmm_max"]
def fetch_q(
self,
station:Union[str, List[str]],
timestep:str,
):
"""
parameters
----------
station :
timestep :
Examples
---------
>>> from ai4water.datasets import NPCTRCatchments
>>> dataset = NPCTRCatchments()
"""
_verify_timestep(timestep)
stations = check_attributes(station, self.stations)
if timestep == "Hourly":
fname = f"2013-2019_Discharge_{timestep}.csv"
df = pd.read_csv(os.path.join(self.ds_dir, fname))
dfs = []
for gname, grp in df.groupby('Watershed'):
grp.index = pd.to_datetime(grp.pop('Datetime'))
grp = grp.resample('H').interpolate(method='linear')
if gname[3:] in stations:
dfs.append(grp)
else:
for station in stations:
fname = f"2013-2019_Discharge{station}_{timestep}.csv"
df = pd.read_csv(fname)
return
def fetch_pcp(
self,
station,
time_step,
):
return
def fetch_temp(
self,
station,
time_step,
):
return
def fetch_rel_hum(
self,
station,
time_step,
):
return
def _verify_timestep(timestep):
assert timestep in ["Hourly", "5min"], f"""
timestep must be either Hourly or 5min but it is {timestep}
"""
return | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/datasets/_npctr.py | _npctr.py |
import os
import sys
import ssl
import glob
import shutil
import zipfile
import tempfile
from typing import Union, List
import urllib.request as ulib
import urllib.parse as urlparse
from ai4water.backend import pd, np
try:
import requests
except ModuleNotFoundError:
requests = None
# following files must exist withing data folder for CAMELS-GB data
DATA_FILES = {
'CAMELS-GB': [
'CAMELS_GB_climatic_attributes.csv',
'CAMELS_GB_humaninfluence_attributes.csv',
'CAMELS_GB_hydrogeology_attributes.csv',
'CAMELS_GB_hydrologic_attributes.csv',
'CAMELS_GB_hydrometry_attributes.csv',
'CAMELS_GB_landcover_attributes.csv',
'CAMELS_GB_soil_attributes.csv',
'CAMELS_GB_topographic_attributes.csv'
],
'HYSETS': [ # following files must exist in a folder containing HYSETS dataset.
'HYSETS_2020_ERA5.nc',
'HYSETS_2020_ERA5Land.nc',
'HYSETS_2020_ERA5Land_SWE.nc',
'HYSETS_2020_Livneh.nc',
'HYSETS_2020_nonQC_stations.nc',
'HYSETS_2020_SCDNA.nc',
'HYSETS_2020_SNODAS_SWE.nc',
'HYSETS_elevation_bands_100m.csv',
'HYSETS_watershed_boundaries.zip',
'HYSETS_watershed_properties.txt'
]
}
def download_all_http_directory(url, outpath=None, filetypes=".zip", match_name=None):
"""
Download all the files which are of category filetypes at the location of
outpath. If a file is already present. It will not be downloaded.
filetypes str: extension of files to be downloaded. By default only .zip files
are downloaded.
mathc_name str: if not None, then only those files will be downloaded whose name
have match_name string in them.
"""
try:
import bs4
except (ModuleNotFoundError, ImportError) as e:
raise e(f"You must install bs4 library e.g. by using"
f"pip install bs4")
if os.name == 'nt':
ssl._create_default_https_context = ssl._create_unverified_context
page = list(urlparse.urlsplit(url))[2].split('/')[-1]
basic_url = url.split(page)[0]
r = requests.get(url)
data = bs4.BeautifulSoup(r.text, "html.parser")
match_name = filetypes if match_name is None else match_name
for l in data.find_all("a"):
if l["href"].endswith(filetypes) and match_name in l['href']:
_outpath = outpath
if outpath is not None:
_outpath = os.path.join(outpath, l['href'])
if os.path.exists(_outpath):
print(f"file {l['href']} already exists at {outpath}")
continue
download(basic_url + l["href"], _outpath)
print(r.status_code, l["href"], )
return
def download(url, out=None):
"""High level function, which downloads URL into tmp file in current
directory and then renames it to filename autodetected from either URL
or HTTP headers.
:param url:
:param out: output filename or directory
:return: filename where URL is downloaded to
"""
# detect of out is a directory
if out is not None:
outdir = os.path.dirname(out)
out_filename = os.path.basename(out)
if outdir == '':
outdir = os.getcwd()
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = os.getcwd()
out_filename = None
# get filename for temp file in current directory
prefix = filename_from_url(url)
(fd, tmpfile) = tempfile.mkstemp(".tmp", prefix=prefix, dir=".")
os.close(fd)
os.unlink(tmpfile)
# set progress monitoring callback
def callback_charged(blocks, block_size, total_size):
# 'closure' to set bar drawing function in callback
callback_progress(blocks, block_size, total_size, bar_function=bar)
callback = callback_charged
# Python 3 can not quote URL as needed
binurl = list(urlparse.urlsplit(url))
binurl[2] = urlparse.quote(binurl[2])
binurl = urlparse.urlunsplit(binurl)
(tmpfile, headers) = ulib.urlretrieve(binurl, tmpfile, callback)
filename = filename_from_url(url)
if out_filename:
filename = out_filename
filename = outdir + "/" + filename
# add numeric ' (x)' suffix if filename already exists
if os.path.exists(filename):
filename = filename + '1'
shutil.move(tmpfile, filename)
# print headers
return filename
__current_size = 0
def callback_progress(blocks, block_size, total_size, bar_function):
"""callback function for urlretrieve that is called when connection is
created and when once for each block
draws adaptive progress bar in terminal/console
use sys.stdout.write() instead of "print,", because it allows one more
symbol at the line end without linefeed on Windows
:param blocks: number of blocks transferred so far
:param block_size: in bytes
:param total_size: in bytes, can be -1 if server doesn't return it
:param bar_function: another callback function to visualize progress
"""
global __current_size
width = 100
if sys.version_info[:3] == (3, 3, 0): # regression workaround
if blocks == 0: # first call
__current_size = 0
else:
__current_size += block_size
current_size = __current_size
else:
current_size = min(blocks * block_size, total_size)
progress = bar_function(current_size, total_size, width)
if progress:
sys.stdout.write("\r" + progress)
def filename_from_url(url):
""":return: detected filename as unicode or None"""
# [ ] test urlparse behavior with unicode url
fname = os.path.basename(urlparse.urlparse(url).path)
if len(fname.strip(" \n\t.")) == 0:
return None
return fname
def bar(current_size, total_size, width):
percent = current_size/total_size * 100
if round(percent % 1, 4) == 0.0:
print(f"{round(percent)}% of {round(total_size*1e-6, 2)} MB downloaded")
return
def check_attributes(attributes, check_against: list) -> list:
if attributes == 'all' or attributes is None:
attributes = check_against
elif not isinstance(attributes, list):
assert isinstance(attributes, str)
assert attributes in check_against
attributes = [attributes]
else:
assert isinstance(attributes, list), f'unknown attributes {attributes}'
assert all(elem in check_against for elem in attributes)
return attributes
def sanity_check(dataset_name, path, url=None):
if dataset_name in DATA_FILES:
if dataset_name == 'CAMELS-GB':
if not os.path.exists(os.path.join(path, 'data')):
raise FileNotFoundError(f"No folder named `data` exists inside {path}")
else:
data_path = os.path.join(path, 'data')
for file in DATA_FILES[dataset_name]:
if not os.path.exists(os.path.join(data_path, file)):
raise FileNotFoundError(f"File {file} must exist inside {data_path}")
_maybe_not_all_files_downloaded(path, url)
return
def _maybe_not_all_files_downloaded(
path:str,
url:Union[str, list, dict]
):
if isinstance(url, dict):
available_files = os.listdir(path)
for fname, link in url.items():
if fname not in available_files:
print(f"file {fname} is not available so downloading it now.")
download_and_unzip(path, {fname:link})
return
def check_st_en(
df:pd.DataFrame,
st:Union[int, str, pd.DatetimeIndex]=None,
en:Union[int, str, pd.DatetimeIndex]=None
)->pd.DataFrame:
"""slices the dataframe based upon st and en"""
if isinstance(st, int):
if en is None:
en = len(df)
else:
assert isinstance(en, int)
df = df.iloc[st:en]
elif isinstance(st, (str, pd.DatetimeIndex)):
if en is None:
en = df.index[-1]
df = df.loc[st:en]
elif isinstance(en, int):
st = 0 # st must be none here
df = df.iloc[st:en]
elif isinstance(en, (str, pd.DatetimeIndex)):
st = df.index[0]
df = df.loc[st:en]
return df
def unzip_all_in_dir(dir_name, ext=".gz"):
gz_files = glob.glob(f"{dir_name}/*{ext}")
for f in gz_files:
shutil.unpack_archive(f, dir_name)
return
def maybe_download(ds_dir,
url:Union[str, List[str], dict],
overwrite:bool=False,
name=None,
include:list=None,
**kwargs):
"""
Parameters
----------
ds_dir :
url :
overwrite :
name :
include :
**kwargs :
any keyword arguments for download_and_unzip function
"""
if os.path.exists(ds_dir) and len(os.listdir(ds_dir)) > 0:
if overwrite:
print(f"removing previous data directory {ds_dir} and downloading new")
shutil.rmtree(ds_dir)
download_and_unzip(ds_dir, url=url, include=include, **kwargs)
else:
print(f"""
Not downloading the data since the directory
{ds_dir} already exists.
Use overwrite=True to remove previously saved files and download again""")
sanity_check(name, ds_dir, url)
else:
download_and_unzip(ds_dir, url=url, include=include, **kwargs)
return
def download_and_unzip(path,
url:Union[str, List[str], dict],
include=None,
**kwargs):
"""
parameters
----------
path :
url :
include :
files to download. Files which are not in include will not be
downloaded.
**kwargs :
any keyword arguments for download_from_zenodo function
"""
from .download_zenodo import download_from_zenodo
if not os.path.exists(path):
os.makedirs(path)
if isinstance(url, str):
if 'zenodo' in url:
download_from_zenodo(path, doi=url, include=include, **kwargs)
else:
download(url, path)
_unzip(path)
elif isinstance(url, list):
for url in url:
if 'zenodo' in url:
download_from_zenodo(path, url, include=include, **kwargs)
else:
download(url, path)
_unzip(path)
elif isinstance(url, dict):
for fname, url in url.items():
if 'zenodo' in url:
download_from_zenodo(path, doi=url, include=include, **kwargs)
else:
download(url, os.path.join(path, fname))
_unzip(path)
else:
raise ValueError(f"Invalid url: {path}, {url}")
return
def _unzip(ds_dir, dirname=None):
"""unzip all the zipped files in a directory"""
if dirname is None:
dirname = ds_dir
all_files = glob.glob(f"{dirname}/*.zip")
for f in all_files:
src = os.path.join(dirname, f)
trgt = os.path.join(dirname, f.split('.zip')[0])
if not os.path.exists(trgt):
print(f"unzipping {src} to {trgt}")
with zipfile.ZipFile(os.path.join(dirname, f), 'r') as zip_ref:
try:
zip_ref.extractall(os.path.join(dirname, f.split('.zip')[0]))
except OSError:
filelist = zip_ref.filelist
for _file in filelist:
if '.txt' in _file.filename or '.csv' in _file.filename or '.xlsx' in _file.filename:
zip_ref.extract(_file)
# extracting tar.gz files todo, check if zip files can also be unpacked by the following oneliner
gz_files = glob.glob(f"{ds_dir}/*.gz")
for f in gz_files:
shutil.unpack_archive(f, ds_dir)
return
class OneHotEncoder(object):
"""
>>> from ai4water.datasets import mg_photodegradation
>>> data, _, _ = mg_photodegradation()
>>> cat_enc1 = OneHotEncoder()
>>> cat_ = cat_enc1.fit_transform(data['Catalyst_type'].values)
>>> _cat = cat_enc1.inverse_transform(cat_)
>>> all([a==b for a,b in zip(data['Catalyst_type'].values, _cat)])
"""
def fit(self, X:np.ndarray):
assert len(X) == X.size
categories, inverse = np.unique(X, return_inverse=True)
X = np.eye(categories.shape[0])[inverse]
self.categories_ = [categories]
return X
def transform(self, X):
return X
def fit_transform(self, X):
return self.transform(self.fit(X))
def inverse_transform(self, X):
return pd.DataFrame(X, columns=self.categories_[0]).idxmax(1).values
class LabelEncoder(object):
"""
>>> from ai4water.datasets import mg_photodegradation
>>> data, _, _ = mg_photodegradation()
>>> cat_enc1 = LabelEncoder()
>>> cat_ = cat_enc1.fit_transform(data['Catalyst_type'].values)
>>> _cat = cat_enc1.inverse_transform(cat_)
>>> all([a==b for a,b in zip(data['Catalyst_type'].values, _cat)])
"""
def fit(self, X):
assert len(X) == X.size
categories, inverse = np.unique(X, return_inverse=True)
self.categories_ = [categories]
labels = np.unique(inverse)
self.mapper_ = {label:category for category,label in zip(categories, labels)}
return inverse
def transform(self, X):
return X
def fit_transform(self, X):
return self.transform(self.fit(X))
def inverse_transform(self, X:np.ndarray):
assert len(X) == X.size
X = np.array(X).reshape(-1,)
return pd.Series(X).map(self.mapper_).values
def encode_column(
df:pd.DataFrame,
col_name:str,
encoding:str
)->tuple:
"""encode a column in a dataframe according the encoding type"""
if encoding == "ohe":
return ohe_column(df, col_name)
elif encoding == "le":
return le_column(df, col_name)
else:
raise ValueError
def ohe_column(df:pd.DataFrame, col_name:str)->tuple:
"""one hot encode a column in datatrame"""
assert isinstance(col_name, str)
assert isinstance(df, pd.DataFrame)
encoder = OneHotEncoder()
ohe_cat = encoder.fit_transform(df[col_name].values.reshape(-1, 1))
cols_added = [f"{col_name}_{i}" for i in range(ohe_cat.shape[-1])]
df[cols_added] = ohe_cat
df.pop(col_name)
return df, cols_added, encoder
def le_column(df:pd.DataFrame, col_name:str)->tuple:
"""label encode a column in dataframe"""
encoder = LabelEncoder()
index = df.columns.to_list().index(col_name)
encoded = encoder.fit_transform(df[col_name])
df.pop(col_name)
df.insert(index, col_name, encoded)
return df, None, encoder | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/datasets/utils.py | utils.py |
import json
import xml.etree.ElementTree as ET
import re
import io
from ai4water.backend import os, pd, requests
class PanParam:
""" PANGAEA Parameter
Shoud be used to create PANGAEA parameter objects. Parameter is used here to represent 'measured variables'
Attributes
----------
ID : int
the identifier for the parameter
name : str
A long name or title used for the parameter
shortName : str
A short name or label to identify the parameter
synonym : dict
A diconary of synonyms for the parameter whcih e.g. is used by other archives or communities.
The dict key indicates the namespace (possible values currently are CF and OS)
type : str
indicates the data type of the parameter (string, numeric, datetime etc..)
source : str
defines the category or source for a parameter (e.g. geocode, data, event)... very PANGAEA specific ;)
unit : str
the unit of measurement used with this parameter (e.g. m/s, kg etc..)
format: str
the number format string given by PANGAEA e.g ##.000 which defines the displayed precision of the number
"""
def __init__(self, ID, name, shortName, param_type, source, unit=None, fmt=None):
self.ID = ID
self.name = name
self.shortName = shortName
# Synonym namespace dict predefined keys are CF: CF variables (), OS:OceanSites, SD:SeaDataNet abbreviations (TEMP, PSAL etc..)
ns = ('CF', 'OS', 'SD')
self.synonym = dict.fromkeys(ns)
self.type = param_type
self.source = source
self.unit = unit
self.format = fmt
class PanDataSet:
""" PANGAEA DataSet
The PANGAEA PanDataSet class enables the creation of objects which hold the necessary information, including data as well as metadata, to analyse a given PANGAEA dataset.
Parameters
----------
ID : str
The identifier of a PANGAEA dataset. An integer number or a DOI is accepted here
deleteFlag : str
in case quality flags are avialable, this parameter defines a flag for which data should not be included in the data dataFrame.
Possible values are listed here: https://wiki.pangaea.de/wiki/Quality_flag
addQC : boolean
adds a QC column for each parameter which contains QC flags
Attributes
----------
ID : str
The identifier of a PANGAEA dataset. An integer number or a DOI is accepted here
params : list of PanParam
a list of all PanParam objects (the parameters) used in this dataset
events : list of PanEvent
a list of all PanEvent objects (the events) used in this dataset
projects : list of PanProject
a list containing the PanProjects objects referenced by this dataset
data : pandas.DataFrame
a pandas dataframe holding all the data
loginstatus : str
a label which indicates if the data set is protected or not default value: 'unrestricted'
"""
def __init__(self, ID=None, paramlist=None, deleteFlag='', addQC=False):
### The constructor allows the initialisation of a PANGAEA dataset object either by using an integer dataset ID or a DOI
self.ID = setID(ID)
self.ns = {'md': 'http://www.pangaea.de/MetaData'}
# Mapping should be moved to e.g netCDF class/module??
# moddir = os.path.dirname(os.path.abspath(__file__))
# self.CFmapping=pd.read_csv(moddir+'\\PANGAEA_CF_mapping.txt',delimiter='\t',index_col='ID')
self.params = dict()
self.defaultparams = ['Latitude', 'Longitude', 'Event', 'Elevation', 'Date/Time']
self.paramlist = paramlist
self.paramlist_index = []
self.events = []
self.projects = []
# allowed geocodes for netcdf generation which are used as xarray dimensions not needed in the moment
self._geocodes = {1599: 'Date_Time', 1600: 'Latitude', 1601: 'Longitude', 1619: 'Depth water'}
self.loginstatus = 'unrestricted'
self.deleteFlag = deleteFlag
self.metadata = {}
# print('trying to load data and metadata from PANGAEA')
self.set_metadata()
self.defaultparams = [s for s in self.defaultparams if s in self.params.keys()]
self.addQC = addQC
if self.paramlist is not None:
if len(self.paramlist) != len(self.paramlist_index):
raise ValueError
def _setParameters(self, panXMLMatrixColumn):
"""
Initializes the list of parameter objects from the metadata XML info
"""
coln = dict()
if panXMLMatrixColumn is not None:
for matrix in panXMLMatrixColumn:
paramstr = matrix.find("md:parameter", self.ns)
panparID = int(_getID(str(paramstr.get('id'))))
panparShortName = ''
if paramstr.find('md:shortName', self.ns) is not None:
panparShortName = paramstr.find('md:shortName', self.ns).text
# Rename duplicate column headers
if panparShortName in coln:
coln[panparShortName] += 1
panparShortName = panparShortName + '_' + str(coln[panparShortName])
else:
coln[panparShortName] = 1
panparType = matrix.get('type')
panparUnit = None
if paramstr.find('md:unit', self.ns) is not None:
panparUnit = paramstr.find('md:unit', self.ns).text
panparFormat = matrix.get('format')
self.params[panparShortName] = PanParam(panparID, paramstr.find('md:name', self.ns).text,
panparShortName, panparType, matrix.get('source'), panparUnit,
panparFormat)
def get_data(self, addQC=False):
"""
This method populates the data DataFrame with data from a PANGAEA dataset.
In addition to the data given in the tabular ASCII file delivered by PANGAEA.
Parameters:
-----------
addQC : boolean
If this is set to True, pangaeapy adds a QC column in which the quality flags are separated.
Each new column is named after the orgininal column plus a "_qc" suffix.
"""
if self.metadata['hierarchyLevel'] is not None:
if self.metadata['hierarchyLevel'].get('value') == 'parent':
raise ValueError(f"""
Data set is of type parent, please select one of its child datasets.
The {len(self.children())} child datasets are \n{self.children()}""")
# converting list of parameters` short names (from user input) to the list of parameters` indexes
# the list of parameters` indexes is an argument for pd.read_csv
if self.paramlist is not None:
self.paramlist += self.defaultparams
for parameter in self.paramlist:
_iter = 0
for shortName in self.params.keys():
if parameter == shortName:
self.paramlist_index.append(_iter)
_iter += 1
if len(self.paramlist) != len(self.paramlist_index):
raise ValueError("Error entering parameters`short names!")
else:
self.paramlist_index = None
dataURL = "https://doi.pangaea.de/10.1594/PANGAEA." + str(self.ID) + "?format=textfile"
panDataTxt = requests.get(dataURL).text
panData = re.sub(r"/\*(.*)\*/", "", panDataTxt, 1, re.DOTALL).strip()
# Read in PANGAEA Data
data = pd.read_csv(io.StringIO(panData), index_col=False, error_bad_lines=False, sep=u'\t',
usecols=self.paramlist_index, names=list(self.params.keys()), skiprows=[0])
# add geocode/dimension columns from Event
# -- delete values with given QC flags
if self.deleteFlag != '':
if self.deleteFlag == '?' or self.deleteFlag == '*':
self.deleteFlag = "\\" + self.deleteFlag
data.replace(regex=r'^' + self.deleteFlag + '{1}.*', value='', inplace=True)
# --- Replace Quality Flags for numeric columns
if not addQC:
data.replace(regex=r'^[\?/\*#\<\>]', value='', inplace=True)
# --- Delete empty columns
data = data.dropna(axis=1, how='all')
for paramcolumn in list(self.params.keys()):
if paramcolumn not in data.columns:
del self.params[paramcolumn]
# --- add QC columns
elif addQC:
if self.params[paramcolumn].type == 'numeric':
data[[paramcolumn + '_qc', paramcolumn]] = data[paramcolumn].astype(str).str.extract(
r'(^[\*/\?])?(.+)')
# --- Adjust Column Data Types
data = data.apply(pd.to_numeric, errors='ignore')
if 'Date/Time' in data.columns:
data.index = pd.to_datetime(data['Date/Time'], format='%Y/%m/%dT%H:%M:%S')
data.index.names = ['index']
data.pop('Date/Time')
return data
def download(self, path, name=None, **kwargs):
if name is None:
name = self.metadata['title'].replace(' ', '_')
name = name.replace('-', '_')
path = os.path.join(path, name+'.txt')
self.get_data().to_csv(path, **kwargs)
if 'hierarchyLevel' in self.metadata and self.metadata['hierarchyLevel'] is not None:
self.metadata['hierarchyLevel'] = self.metadata['hierarchyLevel'].text
fname = os.path.join(os.path.dirname(path), f'{name}_metadata.json')
with open(fname, 'w') as fp:
json.dump(self.metadata, fp, indent=4, sort_keys=False)
return name
def set_metadata(self):
"""
The method initializes the metadata of the PanDataSet object using the information of a PANGAEA metadata XML file.
"""
_metadata = {}
metaDataURL = "https://doi.pangaea.de/10.1594/PANGAEA." + str(self.ID) + "?format=metainfo_xml"
r = requests.get(metaDataURL)
if r.status_code != 404:
try:
r.raise_for_status()
xmlText = r.text
xml = ET.fromstring(xmlText)
self.metadata['loginstatus'] = xml.find('./md:technicalInfo/md:entry[@key="loginOption"]', self.ns).get('value')
if self.metadata['loginstatus'] != 'unrestricted':
raise ValueError('Data set is protected')
self.metadata['hierarchyLevel'] = xml.find('./md:technicalInfo/md:entry[@key="hierarchyLevel"]', self.ns)
self.metadata['title'] = xml.find("./md:citation/md:title", self.ns).text
self.metadata['year'] = xml.find("./md:citation/md:year", self.ns).text
self.metadata['author_info'] = self.find_author_info(xml)
self.metadata['project_info'] = self.find_project_info(xml)
self.metadata['license_info'] = self.find_license_info(xml)
topotypeEl = xml.find("./md:extent/md:topoType", self.ns)
if topotypeEl is not None:
self.topotype = topotypeEl.text
else:
self.topotype = None
panXMLMatrixColumn = xml.findall("./md:matrixColumn", self.ns)
self._setParameters(panXMLMatrixColumn)
except requests.exceptions.HTTPError as e:
print(e)
def children(self):
"""Finds the child datasets of a parent dataset"""
kinder = []
childqueryURL = "https://www.pangaea.de/advanced/search.php?q=incollection:" + str(self.ID) + "&count=1000"
r = requests.get(childqueryURL)
if r.status_code != 404:
s = r.json()
for p in s['results']:
kinder.append(p['URI'])
return kinder
def find_license_info(self, xml)->dict:
lizenz = {}
idx = 0
for _license in xml.findall("./md:license", self.ns):
l = _license.find("md:label", self.ns)
lizenz[f'label_{idx}'] = l.text if l is not None else l
n = _license.find("md:name", self.ns)
lizenz[f'name_{idx}'] = n.text if n is not None else n
u = _license.find("md:URI", self.ns)
lizenz[f'URI_{idx}'] = u.text if u is not None else u
idx += 1
return lizenz
def find_project_info(self, xml)->dict:
projekt_info = {}
idx = 0
for project in xml.findall("./md:project", self.ns):
l = project.find("md:label", self.ns)
projekt_info[f'label_{idx}'] = l.text if l is not None else l
n = project.find("md:name", self.ns)
projekt_info['name_{idx}'] = n.text if n is not None else n
u = project.find("md:URI", self.ns)
projekt_info[f'URI_{idx}'] = u.text if u is not None else u
uri = project.find("md:award/md:URI", self.ns)
projekt_info[f'awardURI_{idx}'] = uri.text if uri is not None else uri
idx += 1
return projekt_info
def find_author_info(self, xml)->dict:
autor = {}
idx = 0
for author in xml.findall("./md:citation/md:author", self.ns):
autor[f'lastname_{idx}'] = author.find("md:lastName", self.ns).text
autor[f'firstname_{idx}'] = author.find("md:firstName", self.ns).text
orcid = author.find("md:orcid", self.ns)
autor[f'orcid_{idx}'] = orcid.text if orcid is not None else orcid
idx += 1
return autor
def setID(ID):
"""
Initialize the ID of a data set in case it was not defined in the constructur
Parameters
----------
ID : str
The identifier of a PANGAEA dataset. An integer number or a DOI is accepted here
"""
idmatch = re.search(r'10\.1594\/PANGAEA\.([0-9]+)$', ID)
if idmatch is not None:
return idmatch[1]
else:
return ID
def _getID(panparidstr):
panparidstr = panparidstr[panparidstr.rfind('.') + 1:]
panparId = re.match(r"([a-z]+)([0-9]+)", panparidstr)
if panparId:
return panparId.group(2)
else:
return False
if __name__ == "__main__":
# ds = PanDataSet('10.1594/PANGAEA.898217')
# print(ds.data.shape)
# ds = PanDataSet('10.1594/PANGAEA.882613')
# print(ds.data.shape)
# ds = PanDataSet('10.1594/PANGAEA.879494')
# print(ds.data.shape)
# ds = PanDataSet('10.1594/PANGAEA.831196')
# print(ds.data.shape)
ds = PanDataSet('10.1594/PANGAEA.919103')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.919104')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.909880')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.908290')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.892384')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.883587')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.882178')
print(ds.get_data().shape)
# ds = PanDataSet('10.1594/PANGAEA.811992')
# print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.807883')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.778629')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.774595')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.746240')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.226925')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.905446')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.900958')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.890070')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.882611')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.879507')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.842446')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.841977')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.831193')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.811072')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.811076')
print(ds.get_data().shape)
ds = PanDataSet('10.1594/PANGAEA.912582')
print(ds.get_data().shape) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/datasets/download_pangaea.py | download_pangaea.py |
__all__ = ["Swatch"]
from typing import Union
from ai4water.backend import pd, os, np
from ai4water.datasets._datasets import Datasets
class Swatch(Datasets):
"""
The Surface Water Chemistry (SWatCh) database as introduced in Franz_ and Lobke, 2022.
.. _Franz:
https://essd.copernicus.org/preprints/essd-2021-43/
"""
url = "https://zenodo.org/record/6484939"
def __init__(self,
remove_csv_after_download=False,
path=None,
**kwargs):
"""
parameters
----------
remove_csv_after_download : bool (default=False)
if True, the csv will be removed after downloading and processing.
"""
super().__init__(path=path, **kwargs)
self.ds_dir = path
self._download(tolerate_error=True)
self._maybe_to_binary()
if remove_csv_after_download:
if os.path.exists(self.csv_name):
os.remove(self.csv_name)
@property
def parameters(self)->list:
"""list of water quality parameters available"""
return list(self.names.values())
@property
def sites(self)->list:
"""list of site names"""
all_sites = np.load(os.path.join(self.ds_dir, 'loc_id.npy'), allow_pickle=True)
# numpy's unique is much slower
return list(np.sort(pd.unique(all_sites)))
@property
def site_names(self)->list:
"""list of site names"""
all_sites = np.load(os.path.join(self.ds_dir, 'location.npy'), allow_pickle=True)
# numpy's unique is much slower
return list(np.sort(pd.unique(all_sites)))
@property
def csv_name(self)->str:
return os.path.join(self.ds_dir, "SWatCh_v2.csv")
@property
def npy_files(self)->list:
return [fname for f in os.walk(self.ds_dir) for fname in f[2] if fname.endswith('.npy')]
def _maybe_to_binary(self):
"""reads the csv file and saves each columns in binary format using numpy.
The csv file is 1.5 GB which takes lot of time for loading most the columns
are not required most of the times.
"""
if len(self.npy_files) == 28:
return
df = pd.read_csv(self.csv_name)
h = {col: "category" for col in cats}
dates = pd.to_datetime(df.pop("ActivityStartDate") + " " + df.pop("ActivityStartTime"))
df.index = dates
maybe_reduce_memory(df, hints=h)
strings = ["ResultComment", "ResultAnalyticalMethodID", "MonitoringLocationID",
"MonitoringLocationName"]
for col in strings:
df[col] = df[col].astype(str)
df.rename(columns=self.names, inplace=True)
for col in df.columns:
np.save(os.path.join(self.ds_dir, col), df[col].values)
np.save(os.path.join(self.ds_dir, "index"), df.index)
return
def _load_as_df(self, parameters)->pd.DataFrame:
paras = []
for para in parameters:
paras.append(np.load(os.path.join(self.ds_dir, f"{para}.npy"), allow_pickle=True))
index = np.load(os.path.join(self.ds_dir, "index.npy"), allow_pickle=True)
return pd.DataFrame(np.column_stack(paras),
columns=parameters,
index=pd.to_datetime(index))
def fetch(
self,
parameters: Union[list, str] = None,
station_id: Union[list, str] = None,
station_names: Union[list, str] = None,
)->pd.DataFrame:
"""
parameters
----------
parameters : str/list (default=None)
Names of parameters to fetch. By default, ``name``, ``value``, ``val_unit``, ``location``,
``lat``, and ``long`` are read.
station_id : str/list (default=None)
name/names of station id for which the data is to be fetched.
By default, the data for all stations is fetched. If given, then
``station_names`` should not be given.
station_names : str/list (default=None)
name/names of station id for which the data is to be fetched.
By default, the data for all stations is fetched. If given, then ``station_id``
should not be given.
Returns
-------
pd.DataFrame
Examples
--------
>>> from ai4water.datasets import Swatch
>>> ds = Swatch()
>>> df = ds.fetch()
"""
def_paras = ["name", "value", "val_unit", "lat", "long"]
if station_id is not None and station_names is not None:
raise ValueError(f"Either station_id or station_names should be given. Not both.")
if station_id is not None:
loc = "loc_id"
else:
loc = "location"
def_paras.append(loc)
if parameters is None:
parameters = def_paras
if isinstance(parameters, str):
parameters = [parameters]
assert isinstance(parameters, list)
df = self._load_as_df(parameters)
return df
def num_samples(
self,
parameter,
station_id = None,
)->int:
"""
parameters
----------
parameter : str
name of the water quality parameter whose samples are to be quantified.
station_id :
if given, samples of parameter will be returned for only this site/sites
otherwise for all sites
"""
raise NotImplementedError
@property
def names(self)->dict:
"""tells the names of parameters in this class and their original names
in SWatCh dataset in the form of a python dictionary
"""
return {
"LaboratoryName": "lab_name",
'ActivityDepthHeightMeasure': "depth_height",
'ActivityDepthHeightUnit': "depth_height_unit",
"ActivityMediaName": "act_name",
"ActivityType": "ActivityType",
"MonitoringLocationHorizontalCoordinateReferenceSystem": "coord_system",
'MonitoringLocationLongitude': "long",
'MonitoringLocationLatitude': "lat",
"CharacteristicName": "name",
"ResultValue": "value",
"ResultValueType": "val_type",
"MonitoringLocationName": "location",
"MonitoringLocationID": "loc_id",
"MonitoringLocationType": "loc_type",
"ResultDetectionQuantitationLimitType": "detect_limit",
"ResultDetectionQuantitationLimitUnit": "detect_limit_type",
"ResultDetectionQuantitationLimitMeasure": "detect_limit_measure",
"ResultDetectionCondition": "detect_cond",
"ResultAnalyticalMethodName": "method_name",
"ResultAnalyticalMethodContext": "method_context",
"ResultAnalyticalMethodID": "method_id",
"ResultUnit": "val_unit",
}
cats = ['ActivityDepthHeightUnit', 'ActivityMediaName', 'ActivityType', 'CharacteristicName',
'DatasetName', 'LaboratoryName', 'MethodSpeciation', 'MonitoringLocationHorizontalCoordinateReferenceSystem',
'MonitoringLocationType', 'ResultAnalyticalMethodContext', 'ResultDetectionCondition',
'ResultDetectionQuantitationLimitType', 'ResultDetectionQuantitationLimitUnit',
'ResultSampleFraction', 'ResultStatusID', 'ResultUnit', 'ResultValueType'
]
def int8(array:Union[np.ndarray, pd.Series])->bool:
return array.min() > np.iinfo(np.int8).min and array.max() < np.iinfo(np.int8).max
def int16(array:Union[np.ndarray, pd.Series])->bool:
return array.min() > np.iinfo(np.int16).min and array.max() < np.iinfo(np.int16).max
def int32(array:Union[np.ndarray, pd.Series])->bool:
return array.min() > np.iinfo(np.int32).min and array.max() < np.iinfo(np.int32).max
def int64(array:Union[np.ndarray, pd.Series])->bool:
return array.min() > np.iinfo(np.int64).min and array.max() < np.iinfo(np.int64).max
def float16(array:Union[np.ndarray, pd.Series])->bool:
return array.min() > np.finfo(np.float16).min and array.max() < np.finfo(np.float16).max
def float32(array:Union[np.ndarray, pd.Series])->bool:
return array.min() > np.finfo(np.float32).min and array.max() < np.finfo(np.float32).max
def maybe_convert_int(series:pd.Series)->pd.Series:
if int8(series):
return series.astype(np.int8)
if int16(series):
return series.astype(np.int16)
if int32(series):
return series.astype(np.int32)
if int64(series):
return series.astype(np.int64)
return series
def maybe_convert_float(series:pd.Series)->pd.Series:
if float16(series):
return series.astype(np.float16)
if float32(series):
return series.astype(np.float32)
return series
def memory_usage(dataframe):
return round(dataframe.memory_usage().sum() / 1024**2, 4)
def maybe_reduce_memory(dataframe:pd.DataFrame, hints=None)->pd.DataFrame:
init_memory = memory_usage(dataframe)
_hints = {col:dataframe[col].dtype.name for col in dataframe.columns}
if hints:
_hints.update(hints)
for col in dataframe.columns:
col_dtype = dataframe[col].dtype.name
if 'int' in _hints[col]:
dataframe[col] = maybe_convert_int(dataframe[col])
elif 'float' in _hints[col]:
dataframe[col] = maybe_convert_float(dataframe[col])
elif 'int' in col_dtype:
dataframe[col] = maybe_convert_int(dataframe[col])
elif 'float' in col_dtype or 'float' in _hints[col]:
dataframe[col] = maybe_convert_float(dataframe[col])
elif col_dtype in ['object'] and 'cat' in _hints[col]:
dataframe[col] = dataframe[col].astype('category')
print(f"memory reduced from {init_memory} to {memory_usage(dataframe)}")
return dataframe | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/datasets/_swatch.py | _swatch.py |
from typing import Union
from ai4water.backend import pd, os
from .camels import Camels
from .camels import CAMELS_AUS
from .camels import CAMELS_CL
from .camels import CAMELS_BR
from .camels import CAMELS_GB
from .camels import CAMELS_US
from .camels import LamaH
from .camels import HYSETS
from .camels import HYPE
from .camels import WaterBenchIowa
from ._datasets import Weisssee
from ._datasets import WaterChemEcuador
from ._datasets import WaterChemVictoriaLakes
from ._datasets import WeatherJena
from ._datasets import WQCantareira
from ._datasets import WQJordan
from ._datasets import FlowSamoylov
from ._datasets import FlowSedDenmark
from ._datasets import StreamTempSpain
from ._datasets import RiverTempEroo
from ._datasets import HoloceneTemp
from ._datasets import FlowTetRiver
from ._datasets import SedimentAmersee
from ._datasets import HydrocarbonsGabes
from ._datasets import HydroChemJava
from ._datasets import PrecipBerlin
from ._datasets import GeoChemMatane
from ._datasets import WQJordan2
from ._datasets import YamaguchiClimateJp
from ._datasets import FlowBenin
from ._datasets import HydrometricParana
from ._datasets import RiverTempSpain
from ._datasets import RiverIsotope
from ._datasets import EtpPcpSamoylov
from ._datasets import SWECanada
from ._datasets import gw_punjab
from ._datasets import RRAlpineCatchments
from ._datasets import RRLuleaSweden
from ._datasets import mg_photodegradation
from ._datasets import qe_biochar_ec
from .mtropics import MtropicsLaos
from .mtropics import MtropcsThailand
from .mtropics import MtropicsVietnam
from .mtropics import ecoli_mekong_laos
from .mtropics import ecoli_houay_pano
from .mtropics import ecoli_mekong_2016
from .mtropics import ecoli_mekong
from ._quadica import Quadica
from ._grqa import GRQA
from ._swatch import Swatch
from ._rc4uscoast import RC4USCoast
from ._grimedb import GRiMeDB
from ._npctr import NPCTRCatchments
from ._hyperspectral import SoilPhosphorus
from .water_quality import DoceRiver
from .water_quality import SeluneRiver
from .water_quality import busan_beach
from .water_quality import RiverChemSiberia
def load_nasdaq(inputs: Union[str, list, None] = None, target: str = 'NDX'):
"""Loads Nasdaq100 by downloading it if it is not already downloaded."""
fname = os.path.join(os.path.dirname(__file__), "nasdaq100_padding.csv")
if not os.path.exists(fname):
print(f"downloading file to {fname}")
df = pd.read_csv("https://raw.githubusercontent.com/KurochkinAlexey/DA-RNN/master/nasdaq100_padding.csv")
df.to_csv(fname)
df = pd.read_csv(fname)
in_cols = list(df.columns)
in_cols.remove(target)
if inputs is None:
inputs = in_cols
target = [target]
return df[inputs + target]
all_datasets = ['CAMELS_AUS', 'CAMELS_CL', 'CAMELS_US', 'CAMELS_GB', 'CAMELS_BR',
'CAMELS_CL', 'LamaH', 'HYPE',
'HYSETS'] | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/datasets/__init__.py | __init__.py |
from typing import Union
from ai4water.backend import pd, os
def busan_beach(
inputs: list = None,
target: Union[list, str] = 'tetx_coppml'
) -> pd.DataFrame:
"""
Loads the Antibiotic resitance genes (ARG) data from a recreational beach
in Busan, South Korea along with environment variables.
The data is in the form of
mutlivariate time series and was collected over the period of 2 years during
several precipitation events. The frequency of environmental data is 30 mins
while that of ARG is discontinuous. The data and its pre-processing is described
in detail in `Jang et al., 2021 <https://doi.org/10.1016/j.watres.2021.117001>`_
Arguments
---------
inputs :
features to use as input. By default all environmental data
is used which consists of following parameters
- tide_cm
- wat_temp_c
- sal_psu
- air_temp_c
- pcp_mm
- pcp3_mm
- pcp6_mm
- pcp12_mm
- wind_dir_deg
- wind_speed_mps
- air_p_hpa
- mslp_hpa
- rel_hum
target :
feature/features to use as target/output. By default
`tetx_coppml` is used as target.
Logically one or more from following can be considered as target
- ecoli
- 16s
- inti1
- Total_args
- tetx_coppml
- sul1_coppml
- blaTEM_coppml
- aac_coppml
- Total_otus
- otu_5575
- otu_273
- otu_94
Returns
-------
pd.DataFrame
a pandas dataframe with inputs and target and indexed
with pandas.DateTimeIndex
Examples
--------
>>> from ai4water.datasets import busan_beach
>>> dataframe = busan_beach()
>>> dataframe.shape
(1446, 14)
>>> dataframe = busan_beach(target=['tetx_coppml', 'sul1_coppml'])
>>> dataframe.shape
(1446, 15)
"""
fpath = os.path.join(os.path.dirname(os.path.dirname(__file__)), "arg_busan.csv")
df = pd.read_csv(fpath, index_col="index")
df.index = pd.to_datetime(df.index)
default_inputs = ['tide_cm', 'wat_temp_c', 'sal_psu', 'air_temp_c', 'pcp_mm', 'pcp3_mm', 'pcp6_mm',
'pcp12_mm', 'wind_dir_deg', 'wind_speed_mps', 'air_p_hpa', 'mslp_hpa', 'rel_hum'
]
default_targets = [col for col in df.columns if col not in default_inputs]
if inputs is None:
inputs = default_inputs
if not isinstance(target, list):
if isinstance(target, str):
target = [target]
elif isinstance(target, list):
pass
else:
target = default_targets
assert isinstance(target, list)
df = df[inputs + target]
return df | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/datasets/water_quality/_busan_beach.py | _busan_beach.py |
__all__ = ["Plot", "murphy_diagram", "edf_plot", "fdc_plot"]
from typing import Union, Callable
from ai4water.backend import os, np, pd, plt, plotly
from ai4water.backend import easy_mpl as em
from .plotting_tools import save_or_show, to_1d_array
class Plot(object):
def __init__(self, path=None, backend='plotly', save=True, dpi=300):
self.path = path
self.backend = backend
self.save = save
self.dpi = dpi
@property
def backend(self):
return self._backend
@backend.setter
def backend(self, x):
_backend = x
assert x in ['plotly', 'matplotlib'], f"unknown backend {x}. Allowed values are `plotly` and `matplotlib`"
if x == 'plotly':
if plotly is None:
_backend = 'matplotlib'
self._backend = _backend
@property
def path(self):
return self._path
@path.setter
def path(self, x):
if x is None:
x = os.getcwd()
self._path = x
def save_or_show(self, save: bool = None, fname=None, where='', dpi=None,
bbox_inches='tight',
close=True, show=False):
if save is None:
save = self.save
if dpi is None:
dpi = self.dpi
return save_or_show(self.path, save, fname, where, dpi, bbox_inches, close,
show=show)
def linear_model(
model_name: str,
inputs,
target
):
import sklearn
from ai4water.backend import get_attributes
models = get_attributes(sklearn, "linear_model", case_sensitive=True)
if model_name not in models:
raise ValueError(f"Can not find {model_name} in sklearn.linear_model")
model = models[model_name]
reg = model().fit(inputs, target)
return reg.predict(inputs)
def murphy_diagram(
observed: Union[list, np.ndarray, pd.Series, pd.DataFrame],
predicted: Union[list, np.ndarray, pd.Series, pd.DataFrame],
reference: Union[list, np.ndarray, pd.Series, pd.DataFrame] = None,
reference_model: Union[str, Callable] = None,
inputs=None,
plot_type: str = "scores",
xaxis: str = "theta",
ax: plt.Axes = None,
line_colors: tuple = None,
fill_color: str = "lightgray",
show: bool = True
) -> plt.Axes:
"""Murphy diagram as introducted by Ehm_ et al., 2015
and illustrated by Rob Hyndman_
Arguments:
observed:
observed or true values
predicted:
model's prediction
reference:
reference prediction
reference_model:
The model for reference prediction. Only relevent if `reference` is
None and `plot_type` is `diff`. It can be callable or a string. If it is a
string, then it can be any model name from sklearn.linear_model_
inputs:
inputs for reference model. Only relevent if `reference_model` is not
None and `plot_type` is `diff`
plot_type:
either of `scores` or `diff`
xaxis:
either of `theta` or `time`
ax:
the axis to use for plotting
line_colors:
colors of line
fill_color:
color to fill confidence interval
show:
whether to show the plot or not
Returns:
matplotlib axes
Example:
>>> import numpy as np
>>> from ai4water.utils.visualizations import murphy_diagram
>>> yy = np.random.randint(1, 1000, 100)
>>> ff1 = np.random.randint(1, 1000, 100)
>>> ff2 = np.random.randint(1, 1000, 100)
>>> murphy_diagram(yy, ff1, ff2)
...
>>> murphy_diagram(yy, ff1, ff2, plot_type="diff")
.. _Ehm:
https://arxiv.org/pdf/1503.08195.pdf
.. _Hyndman:
https://robjhyndman.com/hyndsight/murphy-diagrams/
.. _sklearn.linear_model:
https://scikit-learn.org/stable/modules/classes.html#module-sklearn.linear_model
"""
assert plot_type in ("scores", "diff")
assert xaxis in ("theta", "time")
y = to_1d_array(observed)
f1 = to_1d_array(predicted)
if reference is None:
if plot_type == "diff":
assert reference_model is not None
if callable(reference_model):
reference = reference_model(inputs)
else:
assert inputs is not None, f"You must specify the inputs for {reference_model}"
reference = linear_model(reference_model, inputs, predicted)
f2 = to_1d_array(reference)
else:
f2 = None
else:
f2 = to_1d_array(reference)
line_colors = line_colors or ["dimgrey", "tab:orange"]
n = len(y)
_min, _max = np.nanmin(np.hstack([y, f1, f2])), np.nanmax(np.hstack([y, f1, f2]))
tmp = _min - 0.2 * (_max - _min), _max + 0.2 * (_max - _min)
theta = np.linspace(tmp[0], tmp[1], 501)
s1 = np.full((501, n), np.nan)
s2 = np.full((501, n), np.nan)
max1 = np.maximum(f1, y)
max2 = np.maximum(f2, y)
min1 = np.minimum(f1, y)
min2 = np.minimum(f2, y)
for j in range(n):
s1[:, j] = abs(y[j] - theta) * (max1[j] > theta) * (min1[j] <= theta)
s2[:, j] = abs(y[j] - theta) * (max2[j] > theta) * (min2[j] <= theta)
# grab the axes
if ax is None:
ax = plt.gca()
if xaxis == "theta":
s1ave, s2ave = _data_for_theta(s1, s2)
else:
raise NotImplementedError
if plot_type == "scores":
_plot_scores(theta, s1ave, s2ave, ax, line_colors)
ax.set_ylabel("Empirical Scores", fontsize=16)
else:
_plot_diff(theta, s1, s2, n, ax, line_colors[0], fill_color)
ax.set_ylabel("Difference in scores", fontsize=16)
ax.set_xlabel(xaxis, fontsize=16)
ax.set_title("Murphy Diagram", fontsize=16)
if show:
plt.show()
return ax
def last_nonzero(arr, axis, invalid_val=-1):
mask = arr != 0
val = arr.shape[axis] - np.flip(mask, axis=axis).argmax(axis=axis) - 1
return np.where(mask.any(axis=axis), val, invalid_val)
def _plot_diff(theta, s1, s2, n, ax, line_color="black", fill_color="lightgray"):
se = np.std(s1 - s2) / np.sqrt(n)
diff = np.mean(s1 - s2, axis=1)
upper = diff + 1.96 * se
lower = diff - 1.96 * se
ax.plot(theta, diff, color=line_color)
# first_nonzero occurence
st = (diff != 0).argmax(axis=0)
en = last_nonzero(diff, axis=0).item()
ax.fill_between(theta[st:en], upper[st:en], lower[st:en], # alpha=0.2,
color=fill_color)
return ax
def fdc_plot(
sim: Union[list, np.ndarray, pd.Series, pd.DataFrame],
obs: Union[list, np.ndarray, pd.Series, pd.DataFrame],
ax: plt.Axes = None,
legend: bool = True,
xlabel: str = "Exceedence [%]",
ylabel: str = "Flow",
show: bool = True
) -> plt.Axes:
"""Plots flow duration curve
Arguments:
sim:
simulated flow
obs:
observed flow
ax:
axis on which to plot
legend:
whether to apply legend or not
xlabel:
label to set on x-axis. set to None for no x-label
ylabel:
label to set on y-axis
show:
whether to show the plot or not
Returns:
matplotlib axes
Example:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from ai4water.utils.visualizations import fdc_plot
>>> simulated = np.random.random(100)
>>> observed = np.random.random(100)
>>> fdc_plot(simulated, observed)
>>> plt.show()
"""
sim = to_1d_array(sim)
obs = to_1d_array(obs)
sort_obs = np.sort(sim)[::-1]
exceedence_obs = np.arange(1., len(sort_obs) + 1) / len(sort_obs)
sort_sim = np.sort(obs)[::-1]
exceedence_sim = np.arange(1., len(sort_sim) + 1) / len(sort_sim)
if ax is None:
ax = plt.gca()
ax.plot(exceedence_obs * 100, sort_obs, color='b', label="Observed")
ax.plot(exceedence_sim * 100, sort_sim, color='r', label="Simulated")
if legend:
ax.legend()
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if show:
plt.show()
return ax
def _plot_scores(theta, s1ave, s2ave, ax, line_colors):
ax.plot(theta, s1ave, color=line_colors[0])
ax.plot(theta, s2ave, color=line_colors[1])
return ax
def _data_for_time(s1, s2):
s1ave, s2ave = np.mean(s1, axis=0), np.mean(s2, axis=0)
return s1ave, s2ave
def _data_for_theta(s1, s2):
return np.mean(s1, axis=1), np.mean(s2, axis=1)
def init_subplots(width=None, height=None, nrows=1, ncols=1, **kwargs):
"""Initializes the fig for subplots"""
plt.close('all')
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, **kwargs)
if width is not None:
fig.set_figwidth(width)
if height is not None:
fig.set_figheight(height)
return fig, ax
def edf_plot(
y: np.ndarray,
num_points: int = 100,
xlabel="Objective Value",
marker: str = '-',
ax: plt.Axes = None,
show:bool = True,
**kwargs
) -> plt.Axes:
"""
Plots the empirical distribution function.
Parameters
----------
y : np.ndarray
array of values
num_points : int
xlabel : str
marker : str
ax : plt.Axes, optional
show : bool, optional (default=True)
whether to show the plot or not
**kwargs :
key word arguments for plot
Returns
-------
plt.Axes
"""
x = np.linspace(np.min(y), np.max(y), num_points)
y_values = np.sum(y[:, np.newaxis] <= x, axis=0) / y.size
y_values = y_values.reshape(-1, )
if ax is None:
_, ax = plt.subplots()
ax.grid()
ax_kws = dict(title="Empirical Distribution Function Plot",
ylabel="Cumulative Probability",
xlabel=xlabel)
if 'ax_kws' in kwargs:
ax_kws.update(ax_kws)
kwargs.pop('ax_kws')
ax = em.plot(
x,
y_values,
marker,
show=False,
ax_kws=ax_kws,
ax=ax,
**kwargs
)
if show:
plt.show()
return ax | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/utils/visualizations.py | visualizations.py |
from ai4water.backend import np, plt
from see_rnn import scalefig
from see_rnn.inspect_gen import detect_nans
from see_rnn.utils import _kw_from_configs, _save_rnn_fig
def rnn_histogram(data, rnn_info, equate_axes=1, configs=None,
**kwargs):
"""Plots histogram grid of RNN weights/gradients by kernel, gate (if gated),
and direction (if bidirectional). Also detects NaNs and shows on plots.
Arguments:
rnn_info: dict
equate_axes: int: 0, 1, 2. 0 --> auto-managed axes. 1 --> kernel &
recurrent subplots' x- & y-axes lims set to common value.
2 --> 1, but lims shared for forward & backward plots.
Bias plot lims never affected.
data: np.ndarray. Pre-fetched data to plot directly - e.g., returned by
`get_rnn_weights`. Overrides `input_data`, `labels` and `mode`.
`model` and layer args are still needed to fetch RNN-specific info.
configs: dict. kwargs to customize various plot schemes:
'plot': passed to ax.imshow(); ax = subplots axis
'subplot': passed to plt.subplots()
'tight': passed to fig.subplots_adjust(); fig = subplots figure
'title': passed to fig.suptitle()
'annot': passed to ax.annotate()
'annot-nan': passed to ax.annotate() for `nan_txt`
'save': passed to fig.savefig() if `savepath` is not None.
(1): tf.data.Dataset, generators, .tfrecords, & other supported TensorFlow
input data formats
kwargs:
w: float. Scale width of resulting plot by a factor.
h: float. Scale height of resulting plot by a factor.
show_borders: bool. If True, shows boxes around plots.
show_xy_ticks: int/bool iter. Slot 0 -> x, Slot 1 -> y.
Ex: [1, 1] -> show both x- and y-ticks (and their labels).
[0, 0] -> hide both.
bins: int. Pyplot `hist` kwarg: number of histogram bins per subplot.
savepath: str/None. Path to save resulting figure to. Also see `configs`.
If None, doesn't save.
Returns:
(subplots_figs, subplots_axes) of generated subplots. If layer is
bidirectional, len(subplots_figs) == 2, and latter's is also doubled.
"""
w, h = kwargs.get('w', 1), kwargs.get('h', 1)
show_borders = kwargs.get('show_borders', False)
show_xy_ticks = kwargs.get('show_xy_ticks', [1, 1])
show_bias = kwargs.get('show_bias', True)
bins = kwargs.get('bins', 150)
savepath = kwargs.get('savepath', False)
show = kwargs.get('show', False)
def _process_configs(configs, w, h, equate_axes):
defaults = {
'plot': dict(),
'subplot': dict(sharex=True, sharey=True, dpi=76, figsize=(9, 9)),
'tight': dict(),
'title': dict(weight='bold', fontsize=12, y=1.05),
'annot': dict(fontsize=12, weight='bold',
xy=(.90, .93), xycoords='axes fraction'),
'annot-nan': dict(fontsize=12, weight='bold', color='red',
xy=(.05, .63), xycoords='axes fraction'),
'save': dict(),
}
# deepcopy configs, and override defaults dicts or dict values
kw = _kw_from_configs(configs, defaults)
if not equate_axes:
kw['subplot'].update({'sharex': False, 'sharey': False})
size = kw['subplot']['figsize']
kw['subplot']['figsize'] = (size[0] * w, size[1] * h)
return kw
def _catch_unknown_kwargs(kwargs):
allowed_kwargs = ('w', 'h', 'show_borders', 'show_xy_ticks', 'bins',
'savepath')
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise Exception("unknown kwarg `%s`" % kwarg)
def _detect_and_zero_nans(matrix_data):
nan_txt = detect_nans(matrix_data, include_inf=True)
if nan_txt is not None: # NaN/Inf detected
matrix_data[np.isnan(matrix_data)] = 0 # set NaNs to zero
matrix_data[np.isinf(matrix_data)] = 0 # set Infs to zero
if ', ' in nan_txt:
nan_txt = '\n'.join(nan_txt.split(', '))
else:
nan_txt = '\n'.join(nan_txt.split(' '))
return matrix_data, nan_txt
def _plot_bias(data, axes, direction_idx, bins, d, kw):
gs = axes[0, 0].get_gridspec()
for ax in axes[-1, :]:
ax.remove()
axbig = fig.add_subplot(gs[-1, :])
matrix_data = data[2 + direction_idx * 3].ravel()
matrix_data, nan_txt = _detect_and_zero_nans(matrix_data)
_pretty_hist(matrix_data, bins, ax=axbig)
d['gate_names'].append('BIAS')
_style_axis(axbig, gate_idx=-1, kernel_type=None, nan_txt=nan_txt,
show_borders=show_borders, d=d, kw=kw)
for ax in axes[-2, :].flat:
# display x labels on bottom row above bias plot as it'll differ
# per bias row not sharing axes
ax.tick_params(axis='both', which='both', labelbottom=True)
def _pretty_hist(matrix_data, bins, ax):
# hist w/ looping gradient coloring & nan detection
N, bins, patches = ax.hist(matrix_data, bins=bins, density=True)
if len(matrix_data) < 1000:
return # fewer bins look better monochrome
bins_norm = bins / bins.max()
n_loops = 8 # number of gradient loops
alpha = 0.94 # graph opacity
for bin_norm, patch in zip(bins_norm, patches):
grad = np.sin(np.pi * n_loops * bin_norm) / 15 + .04
color = (0.121569 + grad * 1.2, 0.466667 + grad, 0.705882 + grad,
alpha) # [.121569, .466667, ...] == matplotlib default blue
patch.set_facecolor(color)
def _get_axes_extrema(axes):
axes = np.array(axes)
is_bidir = len(axes.shape) == 3 and axes.shape[0] != 1
x_new, y_new = [], []
for direction_idx in range(1 + is_bidir):
axis = np.array(axes[direction_idx]).T
for type_idx in range(2): # 2 == len(kernel_types)
x_new += [np.max(np.abs([ax.get_xlim() for ax in axis[type_idx]]))]
y_new += [np.max(np.abs([ax.get_ylim() for ax in axis[type_idx]]))]
return max(x_new), max(y_new)
def _set_axes_limits(axes, x_new, y_new, d):
axes = np.array(axes)
is_bidir = len(axes.shape) == 3 and axes.shape[0] != 1
for direction_idx in range(1 + is_bidir):
axis = np.array(axes[direction_idx]).T
for type_idx in range(2):
for gate_idx in range(d['n_gates']):
axis[type_idx][gate_idx].set_xlim(-x_new, x_new)
axis[type_idx][gate_idx].set_ylim(0, y_new)
def _style_axis(ax, gate_idx, kernel_type, nan_txt, show_borders, d, kw):
if nan_txt is not None:
ax.annotate(nan_txt, **kw['annot-nan'])
is_gated = d['rnn_type'] in gated_types
if gate_idx == 0:
title = kernel_type + ' GATES' * is_gated
ax.set_title(title, weight='bold')
if is_gated:
ax.annotate(d['gate_names'][gate_idx], **kw['annot'])
if not show_borders:
ax.set_frame_on(False)
if not show_xy_ticks[0]:
ax.set_xticks([])
if not show_xy_ticks[1]:
ax.set_yticks([])
def _get_plot_data(data, direction_idx, type_idx, gate_idx, d):
matrix_idx = type_idx + direction_idx * (2 + d['uses_bias'])
matrix_data = data[matrix_idx]
if d['rnn_type'] in d['gated_types']:
start = gate_idx * d['rnn_dim']
end = start + d['rnn_dim']
matrix_data = matrix_data[:, start:end]
return matrix_data.ravel()
def _make_subplots(show_bias, direction_name, d, kw):
if not (d['uses_bias'] and show_bias):
fig, axes = plt.subplots(d['n_gates'], 2, **kw['subplot'])
axes = np.atleast_2d(axes)
return fig, axes
n_rows = 2 * d['n_gates'] + 1
fig, axes = plt.subplots(n_rows, 2, **kw['subplot'])
# merge upper axes pairs to increase height ratio w.r.t. bias plot window
gs = axes[0, 0].get_gridspec()
for ax in axes[:(n_rows - 1)].flat:
ax.remove()
axbigs1, axbigs2 = [], []
for row in range(n_rows // 2):
start = 2 * row
end = start + 2
axbigs1.append(fig.add_subplot(gs[start:end, 0]))
axbigs2.append(fig.add_subplot(gs[start:end, 1]))
axes = np.vstack([np.array([axbigs1, axbigs2]).T, [*axes.flat[-2:]]])
if direction_name != []:
fig.suptitle(direction_name + ' LAYER', **kw['title'])
return fig, axes
kw = _process_configs(configs, w, h, equate_axes)
_catch_unknown_kwargs(kwargs)
# data, rnn_info = _process_rnn_args(model, _id, layer, input_data, labels,
# mode, data)
d = rnn_info
gated_types = ('LSTM', 'GRU', 'CuDNNLSTM', 'CuDNNGRU')
kernel_types = ('KERNEL', 'RECURRENT')
d.update({'gated_types': gated_types, 'kernel_types': kernel_types})
subplots_axes = []
subplots_figs = []
for direction_idx, direction_name in enumerate(d['direction_names']):
fig, axes = _make_subplots(show_bias, direction_name, d, kw)
subplots_axes.append(axes)
subplots_figs.append(fig)
for type_idx, kernel_type in enumerate(kernel_types):
for gate_idx in range(d['n_gates']):
ax = axes[gate_idx][type_idx]
matrix_data = _get_plot_data(data, direction_idx,
type_idx, gate_idx, d)
matrix_data, nan_txt = _detect_and_zero_nans(matrix_data)
_pretty_hist(matrix_data, bins=bins, ax=ax)
_style_axis(ax, gate_idx, kernel_type, nan_txt, show_borders,
d, kw)
if d['uses_bias'] and show_bias:
_plot_bias(data, axes, direction_idx, bins, d, kw)
if kw['tight']:
fig.subplots_adjust(**kw['tight'])
else:
fig.tight_layout()
if equate_axes == 2:
x_new, y_new = _get_axes_extrema(subplots_axes)
_set_axes_limits(subplots_axes, x_new, y_new, d)
for fig in subplots_figs:
scalefig(fig)
if savepath:
_save_rnn_fig(subplots_figs, savepath, kw['save'])
if show:
plt.show()
return subplots_figs, subplots_axes | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/utils/utils_from_see_rnn.py | utils_from_see_rnn.py |
from typing import Union
from ai4water.backend import easy_mpl as ep
from ai4water.backend import os, np, pd, plt
# TODO
# rank histogram, reliability diagram, ROC curve
class Plots(object):
# TODO initialte this class with at least path
def __init__(self, config, path=None, model=None):
self.path = path or os.path.join(os.getcwd(), "results")
self.ml_model = model
self.config = config
@property
def training_data(self, *args, **kwargs):
raise AttributeError
@property
def validation_data(self, *args, **kwargs):
raise AttributeError
@property
def test_data(self, *args, **kwargs):
raise AttributeError
@property
def in_cols(self):
return self.config['input_features']
@property
def out_cols(self):
return self.config['output_features']
def _imshow_3d(self,
activation,
lyr_name,
xticklabels=None,
save=True,
where='activations',
xlabel=None):
act_2d = []
for i in range(activation.shape[2]):
act_2d.append(activation[:, :, i])
activation_2d = np.concatenate(act_2d, axis=1)
self._imshow(activation_2d,
lyr_name + " Activations (3d of {})".format(activation.shape),
save,
lyr_name,
where=where,
xticklabels=xticklabels,
xlabel=xlabel)
return
def _imshow(self,
img,
label: str = '',
save=True,
fname=None,
interpolation: str = 'none',
where='',
rnn_args=None,
cmap=None,
show=False,
**kwargs):
assert np.ndim(img) == 2, "can not plot {} with shape {} and ndim {}".format(label, img.shape, np.ndim(img))
im = ep.imshow(img,
aspect="auto",
interpolation=interpolation,
cmap=cmap,
ax_kws=dict(
xlabel=kwargs.get('xlabel', 'inputs'),
title=label),
show=False)
axis = im.axes
if rnn_args is not None:
assert isinstance(rnn_args, dict)
rnn_dim = int(img.shape[1] / rnn_args['n_gates'])
[plt.axvline(rnn_dim * gate_idx - .5, linewidth=0.8, color='k')
for gate_idx in range(1, rnn_args['n_gates'])]
kwargs['xlabel'] = rnn_args['gate_names_str']
if "RECURRENT" in label.upper():
plt.ylabel("Hidden Units")
else:
plt.ylabel("Channel Units")
else:
axis.set_ylabel('Examples' if 'weight' not in label.lower() else '')
xlabels = kwargs.get('xticklabels', None)
if xlabels is not None:
if len(xlabels) < 30:
axis.set_xticklabels(xlabels, rotation=90)
plt.colorbar(im)
self.save_or_show(save, fname, where=where, show=show)
return
def plot1d(self,
array,
label: str = '',
show=False,
fname=None,
rnn_args=None,
where='',
**kwargs):
plt.close('all')
ax = ep.plot(array,
ax_kws=dict(title=label, xlabel="Examples"),
show=False,
**kwargs)
if rnn_args is not None:
assert isinstance(rnn_args, dict)
rnn_dim = int(array.shape[0] / rnn_args['n_gates'])
[plt.axvline(rnn_dim * gate_idx - .5, linewidth=0.5, color='k')
for gate_idx in range(1, rnn_args['n_gates'])]
plt.xlabel(rnn_args['gate_names_str'])
self.save_or_show(save=True, fname=fname, where=where, show=show)
return ax
def save_or_show(self, *args, **kwargs):
return save_or_show(self.path, *args, **kwargs)
def plot2d_act_for_a_sample(self, activations, sample=0, save: bool = False, name: str = None):
from ai4water.utils.visualizations import init_subplots
fig, axis = init_subplots(height=8)
# for idx, ax in enumerate(axis):
im = axis.imshow(activations[sample, :, :].transpose(), aspect='auto')
axis.set_xlabel('lookback')
axis.set_ylabel('inputs')
axis.set_title('Activations of all inputs at different lookbacks for sample ' + str(sample))
fig.colorbar(im)
self.save_or_show(save=save, fname=name + '_' + str(sample), where='path')
return
def plot1d_act_for_a_sample(self, activations, sample=0, save=False, name=None):
_, axis = plt.subplots()
for idx in range(self.lookback-1):
axis.plot(activations[sample, idx, :].transpose(), label='lookback '+str(idx))
axis.set_xlabel('inputs')
axis.set_ylabel('activation weight')
axis.set_title('Activations at different lookbacks for all inputs for sample ' + str(sample))
self.save_or_show(save=save, fname=name + '_' + str(sample), where='path')
return
def plot_train_data(self, how='3d', save=True, **kwargs):
x, _ = self.training_data(**kwargs)
self.plot_model_input_data(x, how=how, save=save, which='training')
return
def plot_val_data(self, how='3d', save=True, **kwargs):
x, y = self.validation_data(**kwargs)
self.plot_model_input_data(x, how=how, save=save, which='validation')
return
def plot_test_data(self, how='3d', save=True, **kwargs):
x, _ = self.test_data(**kwargs)
self.plot_model_input_data(x, how=how, save=save, which='test')
return
def plot_model_input_data(self,
in_data: Union[list, np.ndarray],
how: str,
save: bool,
which: str = 'training'
) -> None:
assert how in ['3d', 'hist']
if not isinstance(in_data, list):
if isinstance(in_data, dict):
in_data = list(in_data.values())
else:
assert isinstance(in_data, np.ndarray)
in_data = [in_data]
for idx, inputs in enumerate(in_data):
if np.ndim(inputs) == 3:
if how.upper() == "3D":
self._imshow_3d(inputs, which + '_data_' + str(idx), save=save, where='data')
elif np.ndim(inputs) == 2:
if how.upper() == "3D":
self._imshow(inputs, save=save, fname=which + '_data_' + str(idx), where='data')
else:
self.plot_histogram(inputs,
save=save,
fname=which+'_data_' + str(idx),
features=self.in_cols,
where='data')
else:
print(f'skipping shape is {inputs.shape}')
return
def plot_histogram(self,
data: np.ndarray,
save: bool = True,
fname='hist',
features=None,
where='data'
):
assert data.ndim == 2
data = pd.DataFrame(data, columns=features)
data.hist(figsize=(12, 12))
self.save_or_show(save=save, fname=fname+'hist', where=where)
return
def validate_freq(df, freq):
assert isinstance(df.index, pd.DatetimeIndex), "index of dataframe must be pandas DatetimeIndex"
assert freq in ["weekly", "monthly",
"yearly"], f"freq must be one of {'weekly', 'monthly', 'yearly'} but it is {freq}"
return
def _get_nrows_and_ncols(n_subplots, n_rows=None):
if n_rows is None:
n_rows = int(np.sqrt(n_subplots))
n_cols = max(int(n_subplots / n_rows), 1) # ensure n_cols != 0
n_rows = int(n_subplots / n_cols)
while not ((n_subplots / n_cols).is_integer() and
(n_subplots / n_rows).is_integer()):
n_cols -= 1
n_rows = int(n_subplots / n_cols)
return n_rows, n_cols
def save_or_show(path, save: bool = True, fname=None, where='', dpi=300, bbox_inches='tight', close=True,
show=False):
if save:
assert isinstance(fname, str)
if "/" in fname:
fname = fname.replace("/", "__")
if ":" in fname:
fname = fname.replace(":", "__")
save_dir = os.path.join(path, where)
if not os.path.exists(save_dir):
assert os.path.dirname(where) in ['',
'activations',
'weights',
'plots', 'data', 'results'], f"unknown directory: {where}"
save_dir = os.path.join(path, where)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
fname = os.path.join(save_dir, fname + ".png")
plt.savefig(fname, dpi=dpi, bbox_inches=bbox_inches)
if show:
plt.show()
elif close:
plt.close('all')
return
def to_1d_array(array_like) -> np.ndarray:
if array_like.__class__.__name__ in ['list', 'tuple', 'Series']:
return np.array(array_like)
elif array_like.__class__.__name__ == 'ndarray':
if array_like.ndim == 1:
return array_like
else:
assert array_like.size == len(array_like), f'cannot convert multidim ' \
f'array of shape {array_like.shape} to 1d'
return array_like.reshape(-1, )
elif array_like.__class__.__name__ == 'DataFrame' and array_like.ndim == 2:
return array_like.values.reshape(-1,)
else:
raise ValueError(f'cannot convert object array {array_like.__class__.__name__} to 1d ') | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/utils/plotting_tools.py | plotting_tools.py |
import copy
import json
import pprint
import datetime
import warnings
from typing import Union, Any
from shutil import rmtree
from types import FunctionType
from typing import Tuple, List
import collections.abc as collections_abc
import scipy
from ai4water.backend import np, pd, plt, os
from easy_mpl import imshow
from scipy.stats import skew, kurtosis, variation, gmean, hmean
try:
import wrapt
except ModuleNotFoundError:
wrapt = None
MATRIC_TYPES = {
"r2": "max",
"nse": "max",
"r2_score": "max",
"kge": "max",
"corr_coeff": "max",
'accuracy': "max",
'f1_score': 'max',
"mse": "min",
"rmse": "min",
"mape": "min",
"nrmse": "min",
}
ERROR_LABELS = {
'r2': "$R^{2}$",
'nse': 'NSE',
'rmse': 'RMSE',
'mse': 'MSE',
'msle': 'MSLE',
'nrmse': 'Normalized RMSE',
'mape': 'MAPE',
'r2_score': "$R^{2}$ Score",
'mae': 'MAE',
'mase': 'MASE'
}
def reset_seed(seed: Union[int, None], os=None, random=None, np=None,
tf=None, torch=None):
"""
Sets the random seed for a given module if the module is not None
Arguments:
seed : Value of seed to set. If None, then it means we don't wan't to set
the seed.
os : alias for `os` module of python
random : alias for `random` module of python
np : alias for `numpy` module
tf : alias for `tensorflow` module.
torch : alias for `pytorch` module.
"""
if seed:
if np:
np.random.seed(seed)
if random:
random.seed(seed)
if os:
os.environ['PYTHONHASHSEED'] = str(seed)
if tf:
if int(tf.__version__.split('.')[0]) == 1:
tf.compat.v1.random.set_random_seed(seed)
elif int(tf.__version__.split('.')[0]) > 1:
tf.random.set_seed(seed)
if torch:
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
return
def maybe_create_path(prefix=None, path=None):
if path is None:
save_dir = dateandtime_now()
model_dir = os.path.join(os.getcwd(), "results")
if prefix:
model_dir = os.path.join(model_dir, prefix)
save_dir = os.path.join(model_dir, save_dir)
else:
save_dir = path
if not os.path.exists(save_dir):
os.makedirs(save_dir)
for _dir in ['weights']:
if not os.path.exists(os.path.join(save_dir, _dir)):
os.makedirs(os.path.join(save_dir, _dir))
return save_dir
def dateandtime_now() -> str:
"""
Returns the datetime in following format as string
YYYYMMDD_HHMMSS
"""
jetzt = datetime.datetime.now()
dt = ''
for time in ['year', 'month', 'day', 'hour', 'minute', 'second']:
_time = str(getattr(jetzt, time))
if len(_time) < 2:
_time = '0' + _time
if time == 'hour':
_time = '_' + _time
dt += _time
return dt
def dict_to_file(
path,
config=None, errors=None,
indices=None, others=None, name=''):
sort_keys = True
if errors is not None:
suffix = dateandtime_now()
fpath = path + "/errors_" + name + suffix + ".json"
# maybe some errors are not json serializable.
for er_name, er_val in errors.items():
if "int" in er_val.__class__.__name__:
errors[er_name] = int(er_val)
elif "float" in er_val.__class__.__name__:
errors[er_name] = float(er_val)
data = errors
elif config is not None:
fpath = path + "/config.json"
data = config
sort_keys = False
elif indices is not None:
fpath = path + "/indices.json"
data = indices
else:
assert others is not None
data = others
fpath = path
if 'config' in data:
if data['config'].get('model', None) is not None:
model = data['config']['model']
# because ML args which come algorithms may not be of json serializable.
if 'layers' not in model:
model = jsonize(model)
data['config']['model'] = model
with open(fpath, 'w') as fp:
json.dump(data, fp, sort_keys=sort_keys, indent=4, cls=JsonEncoder)
return
def check_min_loss(epoch_losses, epoch, msg: str, save_fg: bool, to_save=None):
epoch_loss_array = epoch_losses[:-1]
current_epoch_loss = epoch_losses[-1]
if len(epoch_loss_array) > 0:
min_loss = np.min(epoch_loss_array)
else:
min_loss = current_epoch_loss
if np.less(current_epoch_loss, min_loss):
msg = msg + " {:10.5f} ".format(current_epoch_loss)
if to_save is not None:
save_fg = True
else:
msg = msg + " "
return msg, save_fg
def check_kwargs(**kwargs):
mode = "ML"
if kwargs.get('model', None) is not None:
model = kwargs['model']
if isinstance(model, dict):
if 'layers' in model:
is_custom_model=False
model_name = None
mode="DL"
else:
assert len(model)==1
_model = list(model.keys())[0]
if isinstance(_model, str):
model_name = _model
is_custom_model = False
elif hasattr(_model, '__call__'): # uninitiated class
check_attributes(_model, ['fit', 'predict', '__init__'])
model_name = _model.__name__
is_custom_model = True
else: # custom class is already initiated
check_attributes(_model, ['fit', 'predict'])
is_custom_model = True
model_name = _model.__class__.__name__
# for case when model='randomforestregressor'
elif isinstance(model, str):
kwargs['model'] = {model: {}}
is_custom_model = False
model_name = model
elif hasattr(model, '__call__'): # uninitiated class
check_attributes(model, ['fit', 'predict', '__init__'])
model_name = model.__name__
is_custom_model = True
kwargs['model'] = {model: {}}
else:
check_attributes(model, ['fit', 'predict'])
is_custom_model = True
model_name = model.__class__.__name__
kwargs['model'] = {model: {}}
if mode=="ML":
# for ML, default batches will be 2d unless the user specifies
# otherwise.
if "batches" not in kwargs:
kwargs["batches"] = "2d"
if "ts_args" not in kwargs:
kwargs["ts_args"] = {'lookback': 1,
'forecast_len': 1,
'forecast_step': 0,
'known_future_inputs': False,
'input_steps': 1,
'output_steps': 1}
else:
is_custom_model = False
model_name = None
if is_custom_model:
if 'mode' not in kwargs:
raise ValueError("""your must provide 'mode' keyword either as
mode='regression' or mode='classification' for custom models""")
return kwargs, model_name, is_custom_model
class make_model(object):
def __init__(self, **kwargs):
self.config, self.data_config, self.opt_paras, self.orig_model = _make_model(
**kwargs)
def process_io(**kwargs):
input_features = kwargs.get('input_features', None)
output_features = kwargs.get('output_features', None)
if isinstance(input_features, str):
input_features = [input_features]
if isinstance(output_features, str):
output_features = [output_features]
kwargs['input_features'] = input_features
kwargs['output_features'] = output_features
return kwargs
def _make_model(**kwargs):
"""
This functions fills the default arguments needed to run all the models.
All the input arguments can be overwritten
by providing their name.
:return
nn_config: `dict`, contais parameters to build and train the neural network
such as `layers`
data_config: `dict`, contains parameters for data preparation/pre-processing/post-processing etc.
"""
kwargs = process_io(**kwargs)
kwargs, model_name, is_custom_model = check_kwargs(**kwargs)
model = kwargs.get('model', None)
def_cat = None
if model is not None:
if 'layers' in model:
def_cat = "DL"
# for DL, the default mode case will be regression
else:
def_cat = "ML"
accept_additional_args = False
if 'accept_additional_args' in kwargs:
accept_additional_args = kwargs.pop('accept_additional_args')
model_args = {
'model': {'type': dict, 'default': None, 'lower': None, 'upper': None, 'between': None},
# can be None or any of the method defined in ai4water.utils.transformatinos.py
'x_transformation': {"type": [str, type(None), dict, list], "default": None, 'lower': None,
'upper': None, 'between': None},
'y_transformation': {"type": [str, type(None), dict, list], "default": None, 'lower': None,
'upper': None, 'between': None},
# for auto-encoders
'composite': {'type': bool, 'default': False, 'lower': None, 'upper': None, 'between': None},
'lr': {'type': float, 'default': 0.001, 'lower': None, 'upper': None, 'between': None},
# can be any of valid keras optimizers https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
'optimizer': {'type': str, 'default': 'adam', 'lower': None, 'upper': None, 'between': None},
'loss': {'type': [str, 'callable'], 'default': 'mse', 'lower': None, 'upper': None, 'between': None},
'quantiles': {'type': list, 'default': None, 'lower': None, 'upper': None, 'between': None},
'epochs': {'type': int, 'default': 14, 'lower': None, 'upper': None, 'between': None},
'min_val_loss': {'type': float, 'default': 0.0001, 'lower': None, 'upper': None, 'between': None},
'patience': {'type': int, 'default': 100, 'lower': None, 'upper': None, 'between': None},
'shuffle': {'type': bool, 'default': True, 'lower': None, 'upper': None, 'between': None},
# to save the best models using checkpoints
'save_model': {'type': bool, 'default': True, 'lower': None, 'upper': None, 'between': None},
'backend': {'type': None, 'default': 'tensorflow', 'lower': None, 'upper': None,
'between': ['tensorflow', 'pytorch']},
# buffer_size is only relevant if 'val_data' is same and shuffle is true.
# https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shuffle
# It is used to shuffle tf.Dataset of training data.
'buffer_size': {'type': int, 'default': 100, 'lower': None, 'upper': None, 'between': None},
# comes handy if we want to skip certain batches from last
'batches_per_epoch': {"type": int, "default": None, 'lower': None, 'upper': None, 'between': None},
# https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit
'steps_per_epoch': {"type": int, "default": None, 'lower': None, 'upper': None, 'between': None},
# can be string or list of strings such as 'mse', 'kge', 'nse', 'pbias'
'monitor': {"type": [list, type(None), str], "default": None, 'lower': None, 'upper': None, 'between': None},
# todo, is it redundant?
# If the model takes one kind of input_features that is it consists of
# only 1 Input layer, then the shape of the batches
# will be inferred from the Input layer but for cases, the model takes
# more than 1 Input, then there can be two
# cases, either all the input_features are of same shape or they
# are not. In second case, we should overwrite `train_paras`
# method. In former case, define whether the batches are 2d or 3d. 3d
# means it is for an LSTM and 2d means it is
# for Dense layer.
'batches': {"type": str, "default": '3d', 'lower': None, 'upper': None, 'between': ["2d", "3d"]},
'prefix': {"type": str, "default": None, 'lower': None, 'upper': None, 'between': None},
'path': {"type": str, "default": None, 'lower': None, 'upper': None, 'between': None},
'kmodel': {'type': None, "default": None, 'lower': None, 'upper': None, 'between': None},
'cross_validator': {'default': None, 'between': ['LeaveOneOut', 'kfold']},
'wandb_config': {'type': dict, 'default': None, 'between': None},
'val_metric': {'type': str, 'default': None},
'model_name_': {'default': None},
'is_custom_model_': {"default": None},
}
data_args = {
# if the shape of last batch is smaller than batch size and if we
# want to skip this last batch, set following to True.
# Useful if we have fixed batch size in our model but the number of samples is not fully divisble by batch size
'drop_remainder': {"type": bool, "default": False, 'lower': None, 'upper': None, 'between': [True, False]},
'category': {'type': str, 'default': def_cat, 'lower': None, 'upper': None, 'between': ["ML", "DL"]},
'mode': {'type': str, 'default': None, 'lower': None, 'upper': None,
'between': ["regression", "classification"]},
'batch_size': {"type": int, "default": 32, 'lower': None, 'upper': None, 'between': None},
'split_random': {'type': bool, 'default': False, 'between': [True, False]},
# fraction of data to be used for validation
'val_fraction': {"type": float, "default": 0.2, 'lower': None, 'upper': None, 'between': None},
# the following argument can be set to 'same' for cases if you want to use same data as validation as well as
# test data. If it is 'same', then same fraction/amount of data will be used for validation and test.
# If this is not string and not None, this will overwite `val_fraction`
'indices': {"type": dict, "default": None, 'lower': None, 'upper': None, 'between': ["same", None]},
# fraction of data to be used for test
'train_fraction': {"type": float, "default": 0.7, 'lower': None, 'upper': None, 'between': None},
# write the data/batches as hdf5 file
'save': {"type": bool, "default": False, 'lower': None, 'upper': None, 'between': None},
'allow_nan_labels': {"type": int, "default": 0, 'lower': 0, 'upper': 2, 'between': None},
'nan_filler': {"type": None, "default": None, "lower": None, "upper": None, "between": None},
# for reproducability
'seed': {"type": None, "default": 313, 'lower': None, 'upper': None, 'between': None},
# input features in data_frame
'input_features': {"type": None, "default": None, 'lower': None, 'upper': None, 'between': None},
# column in dataframe to bse used as output/target
'output_features': {"type": None, "default": None, 'lower': None, 'upper': None, 'between': None},
# tuple of tuples where each tuple consits of two integers, marking the start and end
# of interval. An interval here
# means chunk/rows from the input file/dataframe to be skipped when when preparing
# data/batches for NN. This happens
# when we have for example some missing values at some time in our data.
# For further usage see `examples/using_intervals`
"intervals": {"type": None, "default": None, 'lower': None, 'upper': None, 'between': None},
'verbosity': {"type": int, "default": 1, 'lower': None, 'upper': None, 'between': None},
'teacher_forcing': {'type': bool, 'default': False, 'lower': None, 'upper': None, 'between': [True, False]},
'dataset_args': {'type': dict, 'default': {}},
'ts_args': {"type": dict, "default": {'lookback': 1,
'forecast_len': 1,
'forecast_step': 0,
'known_future_inputs': False,
'input_steps': 1,
'output_steps': 1}}
}
model_config = {key: val['default'] for key, val in model_args.items()}
config = {key: val['default'] for key, val in data_args.items()}
opt_paras = {}
# because there are two kinds of hpos which can be optimized
# some can be in model config and others are in main config
original_other_conf = {}
original_mod_conf = {}
for key, val in kwargs.items():
arg_name = key.lower() # todo, why this?
if val.__class__.__name__ in ['Integer', "Real", "Categorical"]:
opt_paras[key] = val
val2 = val
val = jsonize(val.rvs(1)[0])
val2.name = key
original_other_conf[key] = val2
if key == 'model':
val, _opt_paras, original_mod_conf = find_opt_paras_from_model_config(val)
opt_paras.update(_opt_paras)
if key == 'ts_args':
val, _opt_paras = find_opt_paras_from_ts_args(val)
opt_paras.update(_opt_paras)
if arg_name in model_config:
update_dict(arg_name, val, model_args, model_config)
elif arg_name in config:
update_dict(arg_name, val, data_args, config)
elif arg_name in ['x_transformer_', 'y_transformer_']:
config[arg_name] = val
# config may contain additional user defined args which will not be checked
elif not accept_additional_args:
raise ValueError(f"Unknown keyworkd argument '{key}' provided")
else:
config[key] = val
if config['allow_nan_labels'] > 0:
assert 'layers' in model_config['model'], f"""
The model appears to be deep learning based because
the argument `model` does not have layers. But you are
allowing nan labels in the targets.
However, `allow_nan_labels` should be > 0 only for deep learning models
"""
config.update(model_config)
if isinstance(config['input_features'], dict):
for data in [config['input_features'], config['output_features']]:
for k, v in data.items():
assert isinstance(v, list), f"""
{k} is of type {v.__class__.__name__} but it must of of type list
{k}: {v}"""
_data_config = {}
for key, val in config.items():
if key in data_args:
_data_config[key] = val
config['model_name_'] = model_name
config['is_custom_model_'] = is_custom_model
return config, _data_config, opt_paras, {'model': original_mod_conf, 'other': original_other_conf}
def update_dict(key, val, dict_to_lookup, dict_to_update):
"""Updates the dictionary with key, val if the val is of type dtype."""
dtype = dict_to_lookup[key].get('type', None)
low = dict_to_lookup[key].get('lower', None)
up = dict_to_lookup[key].get('upper', None)
between = dict_to_lookup[key].get('between', None)
if dtype is not None:
if isinstance(dtype, list):
val_type = type(val)
if 'callable' in dtype:
if callable(val):
pass
elif val_type not in dtype:
raise TypeError("{} must be any of the type {} but it is of type {}"
.format(key, dtype, val.__class__.__name__))
elif not isinstance(val, dtype):
# the default value may be None which will be different than dtype
if val != dict_to_lookup[key]['default']:
raise TypeError(f"""
{key} must be of type {dtype} but it is of type {val.__class__.__name__}
{key}: {val}
""")
if isinstance(val, (int, float)):
if low is not None:
if val < low:
raise ValueError(f"""
The value '{val}' for '{key}' must be greater than '{low}'""")
if up is not None:
if val > up:
raise ValueError(f"""
The value '{val} for '{key} must be less than '{up}'""")
if isinstance(val, str):
if between is not None:
if val not in between:
raise ValueError(f"""
Unknown value '{val}' for '{key}'. It must be one of '{between}'""")
dict_to_update[key] = val
return
def deepcopy_dict_without_clone(d: dict) -> dict:
"""makes deepcopy of a dictionary without cloning it"""
assert isinstance(d, dict)
new_d = {}
for k, v in d.items():
if isinstance(v, dict):
new_d[k] = deepcopy_dict_without_clone(v)
elif hasattr(v, '__len__'):
new_d[k] = v[:]
else:
new_d[k] = copy.copy(v)
return new_d
def find_opt_paras_from_ts_args(ts_args:dict)->tuple:
opt_paras = {}
new_ts_args = {'lookback': 15,
'forecast_len': 1,
'forecast_step': 0,
'known_future_inputs': False,
'input_steps': 1,
'output_steps': 1}
new_ts_args.update(ts_args)
for k,v in ts_args.items():
if v.__class__.__name__ in ['Integer', 'Real', 'Categorical']:
if v.name is None or v.name.startswith("integer_") or v.name.startswith("real_"):
v.name = k
opt_paras[k] = v
v = v.rvs(1)[0]
new_ts_args[k] = v
return new_ts_args, opt_paras
def find_opt_paras_from_model_config(
config: Union[dict, str, None]
) -> Tuple[Union[dict, None, str], dict, Union[dict, str, None]]:
opt_paras = {}
if config is None or isinstance(config, str):
return config, opt_paras, config
assert isinstance(config, dict) and len(config) == 1
if 'layers' in config:
original_model_config, _ = process_config_dict(
deepcopy_dict_without_clone(config['layers']), False)
# it is a nn based model
new_lyrs_config, opt_paras = process_config_dict(config['layers'])
new_model_config = {'layers': new_lyrs_config}
else:
# it is a classical ml model
_ml_config = {}
ml_config: dict = list(config.values())[0]
model_name = list(config.keys())[0]
original_model_config, _ = process_config_dict(
copy.deepcopy(config[model_name]), False)
for k, v in ml_config.items():
if v.__class__.__name__ in ['Integer', 'Real', 'Categorical']:
if v.name is None or v.name.startswith("integer_") or v.name.startswith("real_"):
v.name = k
opt_paras[k] = v
v = v.rvs(1)[0]
_ml_config[k] = v
val = _ml_config
new_model_config = {model_name: val}
return new_model_config, opt_paras, original_model_config
def process_config_dict(config_dict: dict, update_initial_guess=True):
"""From a dicitonary defining structure of neural networks, this function
finds out which are hyperparameters from them"""
assert isinstance(config_dict, dict)
opt_paras = {}
def pd(d):
for k, v in d.items():
if isinstance(v, dict) and len(v) > 0:
pd(v)
elif v.__class__.__name__ in ["Integer", "Real", "Categorical"]:
if v.name is None or v.name.startswith("integer_") or v.name.startswith("real_"):
v.name = k
if v.name in opt_paras:
raise ValueError(f"""
Hyperparameter with duplicate name {v.name} found. A hyperparameter to be
optimized with name '{v.name}' already exists""")
opt_paras[v.name] = v
if update_initial_guess:
x0 = jsonize(v.rvs(1)[0]) # get initial guess
d[k] = x0 # inplace change of dictionary
else:
# we most probably have updated the name, so doing inplace change
d[k] = v
return
pd(config_dict)
return config_dict, opt_paras
def update_model_config(config: dict, suggestions:dict)->dict:
"""returns the updated config if config contains any parameter from
suggestions."""
cc = copy.deepcopy(config)
def update(c):
for k, v in c.items():
if isinstance(v, dict):
update(v)
elif v.__class__.__name__ in ["Integer", "Real", "Categorical"]:
c[k] = suggestions[v.name]
return
update(cc)
return cc
def to_datetime_index(idx_array, fmt='%Y%m%d%H%M') -> pd.DatetimeIndex:
""" converts a numpy 1d array into pandas DatetimeIndex type."""
if not isinstance(idx_array, np.ndarray):
raise TypeError
idx = pd.to_datetime(idx_array.astype(str), format=fmt)
idx.freq = pd.infer_freq(idx)
return idx
def jsonize(
obj,
type_converters:dict=None
):
"""
Serializes an object to python's native types so that it can be saved
in json file format. If the object is a sequence, then each member of th sequence
is serialized. Same goes for nested sequences like lists of lists
or list of dictionaries.
Parameters
----------
obj :
any python object that needs to be serialized.
type_converters : dict
a dictionary definiting how to serialize any particular type
The keys of the dictionary should be ``type`` the the values
should be callable to serialize that type.
Return
------
a serialized python object
Examples
--------
>>> import numpy as np
>>> from ai4water.utils import jsonize
>>> a = np.array([2.0])
>>> b = jsonize(a)
>>> type(b) # int
... # if a data container consists of mix of native and third party types
... # only third party types are converted into native types
>>> print(jsonize({1: [1, None, True, np.array(3)], 'b': np.array([1, 3])}))
... {1: [1, None, True, 3], 'b': [1, 2, 3]}
The user can define the methods to serialize some types
e. g., we can serialize tensorflow's tensors using serialize method
>>> from tensorflow.keras.layers import Lambda, serialize
>>> tensor = Lambda(lambda _x: _x[Ellipsis, -1, :])
>>> jsonize({'my_tensor': tensor}, {Lambda: serialize})
"""
# boolean type
if isinstance(obj, bool):
return obj
if 'int' in obj.__class__.__name__:
return int(obj)
if 'float' in obj.__class__.__name__:
return float(obj)
if isinstance(obj, dict):
return {jsonize(k, type_converters): jsonize(v, type_converters) for k, v in obj.items()}
if isinstance(obj, tuple):
return tuple([jsonize(val, type_converters) for val in obj])
if obj.__class__.__name__ == 'NoneType':
return obj
# if obj is a python 'type' such as jsonize(list)
if type(obj).__name__ == type.__name__:
return obj.__name__
if hasattr(obj, '__len__') and not isinstance(obj, str):
if hasattr(obj, 'shape') and len(obj.shape) == 0:
# for cases such as np.array(1)
return jsonize(obj.item(), type_converters)
if obj.__class__.__name__ in ['Series', 'DataFrame']:
# simple list comprehension will iterate over only column names
# if we simply do jsonize(obj.values()), it will not save column names
return {jsonize(k, type_converters): jsonize(v, type_converters) for k,v in obj.items()}
return [jsonize(val, type_converters) for val in obj]
if callable(obj):
if isinstance(obj, FunctionType):
return obj.__name__
if hasattr(obj, '__package__'):
return obj.__package__
if isinstance(obj, collections_abc.Mapping):
return dict(obj)
if obj is Ellipsis:
return {'class_name': '__ellipsis__'}
if wrapt and isinstance(obj, wrapt.ObjectProxy):
return obj.__wrapped__
if type_converters:
for _type, converter in type_converters.items():
if isinstance(obj, _type):
return converter(obj)
# last resort, call the __str__ method of object on it
return str(obj)
def make_hpo_results(opt_dir, metric_name='val_loss') -> dict:
"""Looks in opt_dir and saves the min val_loss with the folder name"""
results = {}
for folder in os.listdir(opt_dir):
fname = os.path.join(os.path.join(opt_dir, folder), 'losses.csv')
if os.path.exists(fname):
df = pd.read_csv(fname)
if 'val_loss' in df:
min_val_loss = round(float(np.nanmin(df[metric_name])), 6)
results[min_val_loss] = {'folder': os.path.basename(folder)}
return results
def find_best_weight(w_path: str,
best: str = "min",
ext: str = ".hdf5",
epoch_identifier: int = None):
"""
Given weights in w_path, find the best weight.
if epoch_identifier is given, it will be given priority to find best_weights
The file_names are supposed in following format FileName_Epoch_Error.ext
Note: if we are monitoring more than two metrics whose desired behaviour
is opposite to each other then this method does not work as desired. However
this can be avoided by specifying `epoch_identifier`.
"""
assert best in ['min', 'max']
all_weights = os.listdir(w_path)
if len(all_weights) == 1:
return all_weights[0]
losses = {}
for w in all_weights:
wname = w.split(ext)[0]
try:
# converting to float so that trailing 0 is removed
val_loss = str(float(wname.split('_')[2]))
except (ValueError, IndexError) as e:
raise ValueError(f"while trying to find best weight in {w_path} with {best} and"
f" {ext} and {epoch_identifier} wname: {wname}"
f" encountered following error \n{e}")
losses[val_loss] = {'loss': wname.split('_')[2], 'epoch': wname.split('_')[1]}
best_weight = None
if epoch_identifier:
for v in losses.values():
if str(epoch_identifier) in v['epoch']:
best_weight = f"weights_{v['epoch']}_{v['loss']}.hdf5"
break
else:
loss_array = np.array([float(l) for l in losses.keys()])
if len(loss_array) == 0:
return None
best_loss = getattr(np, best)(loss_array)
best_weight = f"weights_{losses[str(best_loss)]['epoch']}_{losses[str(best_loss)]['loss']}.hdf5"
return best_weight
def add_folder(opt_dir: str, results: dict)->Union[dict, None]:
folders = [file for file in os.listdir(opt_dir) if os.path.isdir(os.path.join(opt_dir, file))]
num_folders = len(folders)
results_with_folders = results.copy()
if num_folders != len(results):
warnings.warn(f"{num_folders} is not equal to {len(results)} so can not perform ranking")
return
for idx, (k, v) in enumerate(results.items()):
v['folder'] = folders[idx]
results_with_folders[k] = v
return results_with_folders
def remove_all_but_best_weights(w_path, best: str = "min", ext: str = ".hdf5"):
"""removes all the weights from a folder except the best weigtht"""
best_weights = None
if os.path.exists(w_path):
# remove all but best weight
all_weights = os.listdir(w_path)
best_weights = find_best_weight(w_path, best=best, ext=ext)
ws_to_del = [w for w in all_weights if w != best_weights]
for w in ws_to_del:
os.remove(os.path.join(w_path, w))
return best_weights
def clear_weights(
opt_dir:str,
results: dict,
keep:int = None,
rename:bool = True,
write:bool = True
):
"""Optimization will save weights of all the trained models, not all of them
are useful. Here removing weights of all except top 10%. The number of models
whose weights to be retained can be set by `keep` para.
"""
# each value of results is a dictionary which will have 'folders' key/value
# pair added to it, original results dictionary should not be modified.
results = {k:v.copy() for k,v in results.items()}
if 'folder' not in list(results.items())[0]:
results = add_folder(opt_dir, results)
if results is None:
return
if keep is None:
keep = int(len(results) * 0.1)
keep = max(keep, 3)
fname = 'sorted.json'
d = {k: v['y'] for k, v in results.items()}
sorted_iters: list = sorted(d, key=d.get)
# sort a results based on a sorted_iters
results = dict(sorted(results.items(), key=lambda pair: sorted_iters.index(pair[0])))
best_results = {}
for idx, v in enumerate(results.values()):
folder = v['folder']
_path = os.path.join(opt_dir, folder)
w_path = os.path.join(_path, 'weights')
if idx > keep-1:
if os.path.exists(w_path):
rmtree(w_path)
else:
best_weights = remove_all_but_best_weights(w_path)
best_results[folder] = {'path': _path, 'weights': best_weights}
if rename:
rank_folders(opt_dir, results, best_results)
results = {k: jsonize(v) for k, v in results.items()}
if write:
sorted_fname = os.path.join(opt_dir, fname)
with open(sorted_fname, 'w') as sfp:
json.dump(results, sfp, sort_keys=True, indent=True)
return best_results
def rank_folders(opt_dir, results, best_results):
# append ranking of models to folder_names
for idx, v in enumerate(results.values()):
folder = v['folder']
old_path = os.path.join(opt_dir, folder)
new_path = os.path.join(opt_dir, str(idx + 1) + "_" + folder)
os.rename(old_path, new_path)
if folder in best_results:
best_results[folder] = {'path': new_path, 'weights': best_results[folder]}
return
class TrainTestSplit(object):
"""
train_test_split of sklearn can not be used for list of arrays so here
we go
Examples
---------
>>> import numpy as np
>>> from ai4water.utils.utils import TrainTestSplit
>>> x1 = np.random.random((100, 10, 4))
>>> x2 = np.random.random((100, 4))
>>> x = [x1, x2]
>>> y = np.random.random(100)
...
>>> train_x, test_x, train_y, test_y = TrainTestSplit().split_by_random(x, y)
>>> # works as well when only a single array i.e. is provided
>>> train_x, test_x, _, _ = TrainTestSplit().split_by_random(x)
... # if we have a time-series like data, where we want to use earlier samples
... # for training and later samples for test then we can do slice based
>>> train_x, test_x, train_y, test_y = TrainTestSplit().split_by_slicing(x, y)
"""
def __init__(
self,
test_fraction: float = 0.3,
seed : int = None,
train_indices: Union[list, np.ndarray] = None,
test_indices: Union[list, np.ndarray] = None
):
"""
test_fraction:
test fraction. Must be greater than 0. and less than 1.
seed:
random seed for reproducibility
"""
self.test_fraction = test_fraction
self.random_state = np.random.RandomState(seed=seed)
self.train_indices = train_indices
self.test_indices = test_indices
def split_by_slicing(
self,
x: Union[list, np.ndarray, pd.Series, pd.DataFrame, List[np.ndarray]],
y: Union[list, np.ndarray, pd.Series, pd.DataFrame, List[np.ndarray]]=None,
):
"""splits the x and y by slicing which is defined by `test_fraction`
Arguments:
x:
arrays to split
- array like such as list, numpy array or pandas dataframe/series
- list of array like objects
y:
array like
- array like such as list, numpy array or pandas dataframe/series
- list of array like objects
"""
def split_arrays(array):
if isinstance(array, list):
# x is list of arrays
# assert that all arrays are of equal length
assert len(set([len(_array) for _array in array])) == 1, f"arrays are of not same length"
split_at = int(array[0].shape[0] * (1. - self.test_fraction))
else:
split_at = int(len(array) * (1. - self.test_fraction))
train, test = (self.slice_arrays(array, 0, split_at), self.slice_arrays(array, split_at))
return train, test
train_x, test_x = split_arrays(x)
if y is not None:
train_y, test_y = split_arrays(y)
else:
train_y, test_y = [], []
return train_x, test_x, train_y, test_y
def split_by_random(
self,
x: Union[list, np.ndarray, pd.Series, pd.DataFrame, List[np.ndarray]],
y: Union[list, np.ndarray, pd.Series, pd.DataFrame, List[np.ndarray]]=None,
)->Tuple[Any, Any, Any, Any]:
"""
splits the x and y by random splitting.
Arguments:
x:
arrays to split
- array like such as list, numpy array or pandas dataframe/series
- list of array like objects
y:
array like
- array like such as list, numpy array or pandas dataframe/series
- list of array like objects
"""
if isinstance(x, list):
indices = np.arange(len(x[0]))
else:
indices = np.arange(len(x))
indices = self.random_state.permutation(indices)
split_at = int(len(indices) * (1. - self.test_fraction))
train_indices, test_indices = (self.slice_arrays(indices, 0, split_at),
self.slice_arrays(indices, split_at))
train_x = self.slice_with_indices(x, train_indices)
train_y = self.slice_with_indices(y, train_indices)
test_x = self.slice_with_indices(x, test_indices)
test_y = self.slice_with_indices(y, test_indices)
return train_x, test_x, train_y, test_y
def split_by_indices(
self,
x: Union[list, np.ndarray, pd.Series, pd.DataFrame, List[np.ndarray]],
y: Union[list, np.ndarray, pd.Series, pd.DataFrame, List[np.ndarray]]=None,
):
"""splits the x and y by user defined `train_indices` and `test_indices`"""
return self.slice_with_indices(x, self.train_indices), \
self.slice_with_indices(x, self.test_indices), \
self.slice_with_indices(y, self.train_indices), \
self.slice_with_indices(y, self.test_indices)
@staticmethod
def slice_with_indices(array, indices):
if array is None:
return []
if isinstance(array, list):
data = []
for d in array:
if isinstance(d, (pd.Series, pd.DataFrame)):
data.append(d.iloc[indices])
else:
assert isinstance(d, (np.ndarray, pd.DatetimeIndex))
data.append(d[indices])
else:
if isinstance(array, (pd.DataFrame, pd.Series)):
data = array.iloc[indices]
else:
assert isinstance(array, (np.ndarray, pd.DatetimeIndex))
data = array[indices]
return data
@staticmethod
def slice_arrays(arrays, start, stop=None):
if isinstance(arrays, list):
return [array[start:stop] for array in arrays]
elif hasattr(arrays, 'shape'):
return arrays[start:stop]
def KFold(
self,
x,
y,
n_splits,
shuffle=True,
**kwargs
):
from sklearn.model_selection import KFold
kf = KFold(n_splits=n_splits,
random_state=self.random_state,
shuffle=shuffle)
spliter = kf.split(x[0] if isinstance(x, list) else x)
return self.yield_splits(x, y, spliter)
@staticmethod
def yield_splits(x, y, spliter):
for tr_idx, test_idx in spliter:
if isinstance(x, list):
train_x = [xarray[tr_idx] for xarray in x]
test_x = [xarray[test_idx] for xarray in x]
else:
train_x = x[tr_idx]
test_x = x[test_idx]
if isinstance(y, list):
train_y = [yarray[tr_idx] for yarray in y]
test_y = [yarray[test_idx] for yarray in y]
else:
train_y = y[tr_idx]
test_y = y[test_idx]
yield (train_x, train_y), (test_x, test_y)
def TimeSeriesSplit(
self,
x,
y,
**kwargs
):
from sklearn.model_selection import TimeSeriesSplit
kf = TimeSeriesSplit(**kwargs)
spliter = kf.split(x[0] if isinstance(x, list) else x)
return self.yield_splits(x, y, spliter)
def ShuffleSplit(
self,
x,
y,
*args,
**kwargs
):
from sklearn.model_selection import ShuffleSplit
kf = ShuffleSplit(*args, **kwargs)
spliter = kf.split(x[0] if isinstance(x, list) else x)
return self.yield_splits(x, y, spliter)
def LeaveOneOut(
self,
x,
y,
**kwargs
):
from sklearn.model_selection import LeaveOneOut
kf = LeaveOneOut()
spliter = kf.split(x[0] if isinstance(x, list) else x)
return self.yield_splits(x, y, spliter)
def ts_features(data: Union[np.ndarray, pd.DataFrame, pd.Series],
precision: int = 3,
name: str = '',
st: int = 0,
en: int = None,
features: Union[list, str] = None
) -> dict:
"""
Extracts features from 1d time series data. Features can be
* point, one integer or float point value for example mean
* 1D, 1D array for example sin(data)
* 2D, 2D array for example wavelent transform
Arguments:
Gets all the possible stats about an array like object `data`.
data: array like
precision: number of significant figures
name: str, only for erro or warning messages
st: str/int, starting index of data to be considered.
en: str/int, end index of data to be considered.
features: name/names of features to extract from data.
# information holding degree
"""
point_features = {
'Skew': skew,
'Kurtosis': kurtosis,
'Mean': np.nanmean,
'Geometric Mean': gmean,
'Standard error of mean': scipy.stats.sem,
'Median': np.nanmedian,
'Variance': np.nanvar,
'Coefficient of Variation': variation,
'Std': np.nanstd,
'Non Zeros': np.count_nonzero,
'Min': np.nanmin,
'Max': np.nanmax,
'Sum': np.nansum,
'Counts': np.size
}
point_features_lambda = {
'Shannon entropy': lambda x: np.round(scipy.stats.entropy(pd.Series(x).value_counts()), precision),
'Negative counts': lambda x: int(np.sum(x < 0.0)),
'90th percentile': lambda x: np.round(np.nanpercentile(x, 90), precision),
'75th percentile': lambda x: np.round(np.nanpercentile(x, 75), precision),
'50th percentile': lambda x: np.round(np.nanpercentile(x, 50), precision),
'25th percentile': lambda x: np.round(np.nanpercentile(x, 25), precision),
'10th percentile': lambda x: np.round(np.nanpercentile(x, 10), precision),
}
if not isinstance(data, np.ndarray):
if hasattr(data, '__len__'):
data = np.array(data)
else:
raise TypeError(f"{name} must be array like but it is of type {data.__class__.__name__}")
if np.array(data).dtype.type is np.str_:
warnings.warn(f"{name} contains string values")
return {}
if 'int' not in data.dtype.name:
if 'float' not in data.dtype.name:
warnings.warn(f"changing the dtype of {name} from {data.dtype.name} to float")
data = data.astype(np.float64)
assert data.size == len(data), f"""
data must be 1 dimensional array but it has shape {np.shape(data)}
"""
data = data[st:en]
stats = dict()
if features is None:
features = list(point_features.keys()) + list(point_features_lambda.keys())
elif isinstance(features, str):
features = [features]
for feat in features:
if feat in point_features:
stats[feat] = np.round(point_features[feat](data), precision)
elif feat in point_features_lambda:
stats[feat] = point_features_lambda[feat](data)
if 'Harmonic Mean' in features:
try:
stats['Harmonic Mean'] = np.round(hmean(data), precision)
except ValueError:
warnings.warn(f"""Unable to calculate Harmonic mean for {name}. Harmonic mean only defined if all
elements are greater than or equal to zero""", UserWarning)
return jsonize(stats)
def prepare_data(
data: np.ndarray,
lookback: int,
num_inputs: int = None,
num_outputs: int = None,
input_steps: int = 1,
forecast_step: int = 0,
forecast_len: int = 1,
known_future_inputs: bool = False,
output_steps: int = 1,
mask: Union[int, float, np.ndarray] = None
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
converts a numpy nd array into a supervised machine learning problem.
Parameters
----------
data :
nd numpy array whose first dimension represents the number
of examples and the second dimension represents the number of features.
Some of those features will be used as inputs and some will be considered
as outputs depending upon the values of `num_inputs` and `num_outputs`.
lookback :
number of previous steps/values to be used at one step.
num_inputs :
default None, number of input features in data. If None,
it will be calculated as features-outputs. The input data will be all
from start till num_outputs in second dimension.
num_outputs :
number of columns (from last) in data to be used as output.
If None, it will be caculated as features-inputs.
input_steps:
strides/number of steps in input data
forecast_step :
must be greater than equal to 0, which t+ith value to
use as target where i is the horizon. For time series prediction, we
can say, which horizon to predict.
forecast_len :
number of horizons/future values to predict.
known_future_inputs :
Only useful if `forecast_len`>1. If True, this
means, we know and use 'future inputs' while making predictions at t>0
output_steps :
step size in outputs. If =2, it means we want to predict
every second value from the targets
mask :
If int, then the examples with these values in
the output will be skipped. If array then it must be a boolean mask
indicating which examples to include/exclude. The length of mask should
be equal to the number of generated examples. The number of generated
examples is difficult to prognose because it depend upon lookback, input_steps,
and forecast_step. Thus it is better to provide an integer indicating
which values in outputs are to be considered as invalid. Default is
None, which indicates all the generated examples will be returned.
Returns
-------
x : numpy array of shape (examples, lookback, ins) consisting of
input examples
prev_y : numpy array consisting of previous outputs
y : numpy array consisting of target values
Given following data consisting of input/output pairs
+--------+--------+---------+---------+----------+
| input1 | input2 | output1 | output2 | output 3 |
+========+========+=========+=========+==========+
| 1 | 11 | 21 | 31 | 41 |
+--------+--------+---------+---------+----------+
| 2 | 12 | 22 | 32 | 42 |
+--------+--------+---------+---------+----------+
| 3 | 13 | 23 | 33 | 43 |
+--------+--------+---------+---------+----------+
| 4 | 14 | 24 | 34 | 44 |
+--------+--------+---------+---------+----------+
| 5 | 15 | 25 | 35 | 45 |
+--------+--------+---------+---------+----------+
| 6 | 16 | 26 | 36 | 46 |
+--------+--------+---------+---------+----------+
| 7 | 17 | 27 | 37 | 47 |
+--------+--------+---------+---------+----------+
If we use following 2 time series as input
+--------+--------+
| input1 | input2 |
+========+========+
| 1 | 11 |
+--------+--------+
| 2 | 12 |
+--------+--------+
| 3 | 13 |
+--------+--------+
| 4 | 14 |
+--------+--------+
| 5 | 15 |
+--------+--------+
| 6 | 16 |
+--------+--------+
| 7 | 17 |
+--------+--------+
then ``num_inputs`` =2, ``lookback`` =7, ``input_steps`` =1
and if we want to predict
+---------+---------+----------+
| output1 | output2 | output 3 |
+=========+=========+==========+
| 27 | 37 | 47 |
+---------+---------+----------+
then ``num_outputs`` =3, ``forecast_len`` =1, ``forecast_step`` =0,
if we want to predict
+---------+---------+----------+
| output1 | output2 | output 3 |
+=========+=========+==========+
| 28 | 38 | 48 |
+---------+---------+----------+
then ``num_outputs`` =3, ``forecast_len`` =1, ``forecast_step`` =1,
if we want to predict
+---------+---------+----------+
| output1 | output2 | output 3 |
+=========+=========+==========+
| 27 | 37 | 47 |
+---------+---------+----------+
| 28 | 38 | 48 |
+---------+---------+----------+
then ``num_outputs`` =3, ``forecast_len`` =2, horizon/forecast_step=0,
if we want to predict
+---------+---------+----------+
| output1 | output2 | output 3 |
+=========+=========+==========+
| 28 | 38 | 48 |
+---------+---------+----------+
| 29 | 39 | 49 |
+---------+---------+----------+
| 30 | 40 | 50 |
+---------+---------+----------+
then ``num_outputs`` =3, ``forecast_len`` =3, ``forecast_step`` =1,
if we want to predict
+---------+
| output2 |
+=========+
| 38 |
+---------+
| 39 |
+---------+
| 40 |
+---------+
then ``num_outputs`` =1, ``forecast_len`` =3, ``forecast_step`` =0
if we predict
+---------+
| output2 |
+=========+
| 39 |
+---------+
then ``num_outputs`` =1, ``forecast_len`` =1, ``forecast_step`` =2
if we predict
+---------+
| output2 |
+=========+
| 39 |
+---------+
| 40 |
+---------+
| 41 |
+---------+
then ``num_outputs`` =1, ``forecast_len`` =3, ``forecast_step`` =2
If we use following two time series as input
+--------+--------+
|input1 | input2 |
+========+========+
| 1 | 11 |
+--------+--------+
| 3 | 13 |
+--------+--------+
| 5 | 15 |
+--------+--------+
| 7 | 17 |
+--------+--------+
then ``num_inputs`` =2, ``lookback`` =4, ``input_steps`` =2
If the input is
+--------+--------+
| input1 | input2 |
+========+========+
| 1 | 11 |
+--------+--------+
| 2 | 12 |
+--------+--------+
| 3 | 13 |
+--------+--------+
| 4 | 14 |
+--------+--------+
| 5 | 15 |
+--------+--------+
| 6 | 16 |
+--------+--------+
| 7 | 17 |
+--------+--------+
and target/output is
+---------+---------+----------+
| output1 | output2 | output 3 |
+=========+=========+==========+
| 25 | 35 | 45 |
+---------+---------+----------+
| 26 | 36 | 46 |
+---------+---------+----------+
| 27 | 37 | 47 |
+---------+---------+----------+
This means we make use of ``known future inputs``. This can be achieved using
following configuration
num_inputs=2, num_outputs=3, lookback=4, forecast_len=3, forecast_step=1, known_future_inputs=True
The general shape of output/target/label is
(examples, num_outputs, forecast_len)
The general shape of inputs/x is
(examples, lookback + forecast_len-1, ....num_inputs)
Examples:
>>> import numpy as np
>>> from ai4water.utils.utils import prepare_data
>>> num_examples = 50
>>> dataframe = np.arange(int(num_examples*5)).reshape(-1, num_examples).transpose()
>>> dataframe[0:10]
array([[ 0, 50, 100, 150, 200],
[ 1, 51, 101, 151, 201],
[ 2, 52, 102, 152, 202],
[ 3, 53, 103, 153, 203],
[ 4, 54, 104, 154, 204],
[ 5, 55, 105, 155, 205],
[ 6, 56, 106, 156, 206],
[ 7, 57, 107, 157, 207],
[ 8, 58, 108, 158, 208],
[ 9, 59, 109, 159, 209]])
>>> x, prevy, y = prepare_data(dataframe, num_outputs=2, lookback=4,
... input_steps=2, forecast_step=2, forecast_len=4)
>>> x[0]
array([[ 0., 50., 100.],
[ 2., 52., 102.],
[ 4., 54., 104.],
[ 6., 56., 106.]], dtype=float32)
>>> y[0]
array([[158., 159., 160., 161.],
[208., 209., 210., 211.]], dtype=float32)
>>> x, prevy, y = prepare_data(dataframe, num_outputs=2, lookback=4,
... forecast_len=3, known_future_inputs=True)
>>> x[0]
array([[ 0, 50, 100],
[ 1, 51, 101],
[ 2, 52, 102],
[ 3, 53, 103],
[ 4, 54, 104],
[ 5, 55, 105],
[ 6, 56, 106]]) # (7, 3)
>>> # it is important to note that although lookback=4 but x[0] has shape of 7
>>> y[0]
array([[154., 155., 156.],
[204., 205., 206.]], dtype=float32) # (2, 3)
"""
if not isinstance(data, np.ndarray):
if isinstance(data, pd.DataFrame):
data = data.values
else:
raise TypeError(f"unknown data type for data {data.__class__.__name__}")
if num_inputs is None and num_outputs is None:
raise ValueError("""
Either of num_inputs or num_outputs must be provided.
""")
features = data.shape[1]
if num_outputs is None:
num_outputs = features - num_inputs
if num_inputs is None:
num_inputs = features - num_outputs
assert num_inputs + num_outputs == features, f"""
num_inputs {num_inputs} + num_outputs {num_outputs} != total features {features}"""
if len(data) <= 1:
raise ValueError(f"Can not create batches from data with shape {data.shape}")
time_steps = lookback
if known_future_inputs:
lookback = lookback + forecast_len
assert forecast_len > 1, f"""
known_futre_inputs should be True only when making predictions at multiple
horizons i.e. when forecast length/number of horizons to predict is > 1.
known_future_inputs: {known_future_inputs}
forecast_len: {forecast_len}"""
examples = len(data)
x = []
prev_y = []
y = []
for i in range(examples - lookback * input_steps + 1 - forecast_step - forecast_len + 1):
stx, enx = i, i + lookback * input_steps
x_example = data[stx:enx:input_steps, 0:features - num_outputs]
st, en = i, i + (lookback - 1) * input_steps
y_data = data[st:en:input_steps, features - num_outputs:]
sty = (i + time_steps * input_steps) + forecast_step - input_steps
eny = sty + forecast_len
target = data[sty:eny, features - num_outputs:]
x.append(np.array(x_example))
prev_y.append(np.array(y_data))
y.append(np.array(target))
x = np.stack(x)
prev_y = np.array([np.array(i, dtype=np.float32) for i in prev_y], dtype=np.float32)
# transpose because we want labels to be of shape (examples, outs, forecast_len)
y = np.array([np.array(i, dtype=np.float32).T for i in y], dtype=np.float32)
if mask is not None:
if isinstance(mask, np.ndarray):
assert mask.ndim == 1
assert len(x) == len(mask), f"Number of generated examples are {len(x)} " \
f"but the length of mask is {len(mask)}"
elif isinstance(mask, float) and np.isnan(mask):
mask = np.invert(np.isnan(y))
mask = np.array([all(i.reshape(-1,)) for i in mask])
else:
assert isinstance(mask, int), f"""
Invalid mask identifier given of type: {mask.__class__.__name__}"""
mask = y != mask
mask = np.array([all(i.reshape(-1,)) for i in mask])
x = x[mask]
prev_y = prev_y[mask]
y = y[mask]
return x, prev_y, y
def find_tot_plots(features, max_subplots):
tot_plots = np.linspace(0, features, int(features / max_subplots) + 1 if features % max_subplots == 0 else int(
features / max_subplots) + 2)
# converting each value to int because linspace can return array containing floats if features is odd
tot_plots = [int(i) for i in tot_plots]
return tot_plots
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if 'int' in obj.__class__.__name__:
return int(obj)
elif 'float' in obj.__class__.__name__:
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif 'bool' in obj.__class__.__name__:
return bool(obj)
elif callable(obj) and hasattr(obj, '__module__'):
if isinstance(obj, FunctionType):
return obj.__name__
else:
return obj.__module__
else:
return super(JsonEncoder, self).default(obj)
def plot_activations_along_inputs(
data: np.ndarray,
activations: np.ndarray,
observations: np.ndarray,
predictions: np.ndarray,
in_cols: list,
out_cols: list,
lookback: int,
name: str,
path: str,
vmin=None,
vmax=None,
show=False
):
# activation must be of shape (num_examples, lookback, input_features)
assert activations.shape[1] == lookback
assert activations.shape[2] == len(in_cols), f'{activations.shape}, {len(in_cols)}'
# data is of shape (num_examples, input_features)
assert data.shape[1] == len(in_cols)
assert len(data) == len(activations)
for out in range(len(out_cols)):
pred = predictions[:, out]
if observations is None:
obs = None
else:
obs = observations[:, out]
out_name = out_cols[out]
for idx in range(len(in_cols)):
plt.close('all')
fig, (ax1, ax2, ax3) = plt.subplots(3, sharex='all')
fig.set_figheight(12)
ax1.plot(data[:, idx], label=in_cols[idx])
ax1.legend()
ax1.set_title('activations w.r.t ' + in_cols[idx])
ax1.set_ylabel(in_cols[idx])
ax2.plot(pred, label='Prediction')
if obs is not None:
ax2.plot(obs, '.', label='Observed')
ax2.legend()
ytick_labels = [f"t-{int(i)}" for i in np.linspace(lookback - 1, 0, lookback)]
im = imshow(
activations[:, :, idx].transpose(),
vmin=vmin,
vmax=vmax,
aspect="auto",
ax = ax3,
ax_kws=dict(xlabel="Examples",
ylabel="lookback steps"),
show=False,
yticklabels=ytick_labels
)
fig.colorbar(im, orientation='horizontal', pad=0.2)
plt.subplots_adjust(wspace=0.005, hspace=0.005)
_name = f'attn_weights_{out_name}_{name}_'
plt.savefig(os.path.join(path, _name) + in_cols[idx], dpi=400, bbox_inches='tight')
if show:
plt.show()
plt.close('all')
return
class DataNotFound(Exception):
def __init__(self, source):
self.source= source
def __str__(self):
return f"""
Unable to get {self.source} data.
You must specify the data either using 'x' or 'data' keywords."""
def print_something(something, prefix=''):
"""prints shape of some python object"""
if hasattr(something, "shape"):
print(f"{prefix} shape: ", something.shape)
elif isinstance(something, list):
print(f"{prefix} shape: ", [thing.shape for thing in something if hasattr(thing, "shape")])
elif isinstance(something, dict):
print(f"{prefix} shape: ")
pprint.pprint({k: v.shape for k, v in something.items() if hasattr(v, "shape")}, width=40)
else:
print(something)
def maybe_three_outputs(data, teacher_forcing=False):
"""num_outputs: how many outputs from data we want"""
if teacher_forcing:
num_outputs = 3
else:
num_outputs = 2
if num_outputs == 2:
if len(data) == 2:
return data[0], data[1]
elif len(data) == 3:
return data[0], data[2]
else:
if len(data)==3:
return [data[0], data[1]], data[2]
# DA, IA-LSTM models return [x,prevy],y even when teacher_forcing is on!
return data
def get_version_info(
**kwargs
) -> dict:
"""returns version information of all the packages which are
used by different modules of ai4water. """
import sys
from ai4water.backend import lightgbm, tcn, catboost, xgboost, easy_mpl, SeqMetrics
from ai4water.backend import tf, keras, torch
from ai4water.backend import np, pd, mpl
from ai4water.backend import h5py
from ai4water.backend import sklearn, shapefile, xr, netCDF4
from ai4water.backend import optuna, skopt, hyperopt, plotly
from ai4water.backend import fiona
from ai4water.backend import lime, sns
from ai4water import __version__
info = {'python': sys.version, 'os': os.name, 'ai4water': __version__}
if kwargs.get('tf', None):
tf = kwargs['tf']
info['tf_is_built_with_cuda'] = tf.test.is_built_with_cuda()
info['is_built_with_gpu_support'] = tf.test.is_built_with_gpu_support()
info['tf_is_gpu_available'] = tf.test.is_gpu_available()
info['eager_execution'] = tf.executing_eagerly()
for lib in [
lightgbm, tcn, catboost, xgboost, easy_mpl, SeqMetrics,
tf, keras, torch, np, pd, mpl, h5py, sklearn,
shapefile, fiona, xr, netCDF4,
optuna, skopt, hyperopt, plotly,
lime, sns]:
if lib is not None:
info[getattr(lib, '__name__')] = getattr(lib, '__version__', 'NotDefined')
return info
def check_attributes(model, attributes):
for method in attributes:
if not hasattr(model, method):
raise ValueError(f"your custom class does not have {method}")
def get_nrows_ncols(n_rows, n_subplots)->"tuple[int, int]":
if n_rows is None:
n_rows = int(np.sqrt(n_subplots))
n_cols = max(int(n_subplots / n_rows), 1) # ensure n_cols != 0
n_rows = int(n_subplots / n_cols)
while not ((n_subplots / n_cols).is_integer() and
(n_subplots / n_rows).is_integer()):
n_cols -= 1
n_rows = int(n_subplots / n_cols)
return n_rows, n_cols
METRIC_TYPES = {
"r2": "max",
"nse": "max",
"r2_score": "max",
"kge": "max",
'log_nse': 'max',
"corr_coeff": "max",
'accuracy': "max",
'f1_score': 'max',
"mse": "min",
"rmse": "min",
"rmsle": "min",
"mape": "min",
"nrmse": "min",
"pbias": "min",
"bias": "min",
"med_seq_error": "min",
}
class AttribtueSetter(object):
def __init__(self, obj, y: np.ndarray, from_fit=None):
if obj.mode is None:
if 'float' in y.dtype.name:
obj.mode = "regression"
else:
obj.mode = "regression"
warnings.warn(f"inferred mode is {obj.mode}. Ignore this messare if the inferred mode is correct.")
self.mode = obj.mode
obj.classes_ = self.classes(y) # for sklearn
obj.num_classes_ = len(obj.classes_)
obj.is_binary_ = self.is_binary(y)
outs = getattr(obj, 'output_features', '') or ''
obj.is_multiclass_ = self.is_multiclass(y, outs)
obj.is_multilabel_ = self.is_multilabel(outs)
obj.is_fitted_ = from_fit
return
def is_multiclass(self, y, output_features='') -> bool:
"""Returns True if the porblem is multiclass classification"""
_default = False
if self.mode == 'classification':
if len(output_features) <= 1: # also consider 0 bcz when when output_features is None/'', it will be 0
if len(self.classes(y)) > 2:
_default = True
# elif len(y) == y.size: # this means the names of
# pass
else:
pass # todo, check when output columns are one-hot encoded
return _default
def is_multilabel(self,
output_features='',
):
if self.mode == "classification":
if len(output_features) > 1:
return True
return False
def classes(self, y: np.ndarray):
if self.mode == "regression":
return []
if len(y) != y.size:
# nd array, one hot encoded
return [i for i in range(y.shape[-1])]
return list(np.unique(y[~np.isnan(y)]))
def is_binary(self, y):
if self.mode == "regression":
return False
if len(y) != y.size: # nd array, may be one hot encoded
if y.shape[-1] == 2 and len(np.unique(y[~np.isnan(y)])) == 2:
return True # binary, one hot encoded
return False
if len(np.unique(y[~np.isnan(y)])) == 2:
return True
return False
def get_values(outputs):
if isinstance(outputs, (dict, list)) and len(outputs) == 1:
outputs = list(outputs.values())[0]
return outputs
def create_subplots(*args, **kwargs):
try:
from pandas.plotting._matplotlib.tools import create_subplots
except ImportError: # for older pandas versions
from pandas.plotting._matplotlib.tools import _subplots as create_subplots
return create_subplots(*args, **kwargs)
def mad(*args, **kwargs):
try:
from scipy.stats import median_abs_deviation as _mad
except ImportError:
from scipy.stats import median_absolute_deviation as _mad
return _mad(*args, **kwargs) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/utils/utils.py | utils.py |
from ai4water.backend import tf, np
if tf is not None:
K = tf.keras.backend
else:
K = None
def reset_graph(seed=313):
tf.compat.v1.reset_default_graph()
tf.compat.v1.set_random_seed(seed) # tf.random.set_seed(seed) #
np.random.seed(seed)
def tf_nse(true, _pred, name='NSE'):
""" Nash-Sutcliff efficiency to be used as loss function. It is subtracted from one before being returned"""
neum = tf.reduce_sum(tf.square(tf.subtract(_pred, true)))
denom = tf.reduce_sum(tf.square(tf.subtract(true, tf.math.reduce_mean(true))))
const = tf.constant(1.0, dtype=tf.float32)
_nse = tf.subtract(const, tf.math.divide(neum, denom), name=name)
return tf.subtract(const, _nse, name=name + '_LOSS')
def corr_coeff(true, predicted):
""" Pearson correlation coefficient
https://stackoverflow.com/a/58890795/5982232
"""
mx = tf.math.reduce_mean(true)
my = tf.math.reduce_mean(predicted)
xm, ym = true - mx, predicted - my
r_num = tf.math.reduce_mean(tf.multiply(xm, ym))
r_den = tf.math.reduce_std(xm) * tf.math.reduce_std(ym)
return r_num / r_den
def tf_kge(true, predicted):
""" Kling Gupta efficiency. It is not being subtracted from 1.0 so that it can be used as loss"""
tf_cc = corr_coeff(true, predicted)
tf_alpha = tf.math.reduce_std(predicted) / tf.math.reduce_std(true)
tf_beta = K.sum(predicted) / K.sum(true)
return K.sqrt(K.square(tf_cc - 1.0) + K.square(tf_alpha - 1.0) + K.square(tf_beta - 1.0))
def tf_r2(true, predicted):
"""
https://www.kaggle.com/c/mercedes-benz-greener-manufacturing/discussion/34019
it is like r2_square score of sklearn which can be negative
Not being subtracted from 1.0
"""
r = corr_coeff(true, predicted)
return r ** 2
def tf_r2_mod(true, predicted):
"""
https://www.kaggle.com/c/mercedes-benz-greener-manufacturing/discussion/34019
it is like r2_square score of sklearn which can be negative
Not being subtracted from 1.0
"""
ss_res = K.sum(K.square(true - predicted))
ss_tot = K.sum(K.square(true - K.mean(true)))
return ss_res / (ss_tot + K.epsilon())
def tf_nse_beta(true, predicted, name='nse_beta'):
"""
Beta decomposition of NSE. See Gupta et. al 2009
used in kratzert et al., 2018
"""
const = tf.constant(1.0, dtype=tf.float32)
nse_beta = (K.mean(predicted) - K.mean(true)) / K.std(true)
return tf.subtract(const, nse_beta, name=name + '_LOSS')
def tf_nse_alpha(true, predicted, name='nse_alpha'):
"""
Alpha decomposition of NSE. See Gupta et. al 2009
used in kratzert et al., 2018
It is being subtracted from 1.0
"""
const = tf.constant(1.0, dtype=tf.float32)
nse_alpha = K.std(predicted) / K.std(true)
return tf.subtract(const, nse_alpha, name=name + '_LOSS')
def pbias(true, predicted):
_sum = K.sum(tf.subtract(predicted, true))
_a = tf.divide(_sum, K.sum(true))
return 100.0 * _a
def nse(true, _pred, name='NSE'):
"""Nash-Sutcliff efficiency to be used as loss function. It is subtracted from one before being returned"""
neum = tf.reduce_sum(tf.square(tf.subtract(_pred, true)))
denom = tf.reduce_sum(tf.square(tf.subtract(true, tf.math.reduce_mean(true))))
const = tf.constant(1.0, dtype=tf.float32)
_nse = tf.subtract(const, tf.math.divide(neum, denom), name=name)
return 1.0 - tf.subtract(const, _nse, name=name + '_LOSS')
def kge(true, predicted):
""" Kling Gupta efficiency. It is not being subtracted from 1.0 so that it can be used as loss"""
tf_cc = corr_coeff(true, predicted)
tf_alpha = tf.math.reduce_std(predicted) / tf.math.reduce_std(true)
tf_beta = K.sum(predicted) / K.sum(true)
return 1.0 - K.sqrt(K.square(tf_cc - 1.0) + K.square(tf_alpha - 1.0) + K.square(tf_beta - 1.0)) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/utils/tf_losses.py | tf_losses.py |
import os
import sys
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from datetime import datetime
class Dataset():
"""
A class that handles datasets for the methods in this python package
Attributes:
dataframe - (pandas.DataFrame) Dataframe containing dataset
feature_names - (list) Llist of feature names
features - (numpy.ndarray) Values of features in dataset
labels - (numpy.ndarrau) Values of real labels for dataset
instance_names - (list) List of indeces for dataset
pred_name - (str) Name of column containing predictions (optional)
predictions - (numpy.ndarray) Array containing predictions
from a classifier for dataset (optional)
cat_features - (list) List of features that are categorical (optional)
train_features - (list) List of features for training (optional)
model - (tensorflow model) A model trained using
the dataset (optional)
title - (str) Title of dataset (optional)
protected_attribute_names - (list) List of features that should
be protected
protected_attributes - (numpy.ndarray) Array containing all
values of protected attributes
instance_weights - (numpy.ndarray) Array containing
weights for each row (optional)
labels_binary - (numpy.ndarray) Labels cast to
zeros and ones
Functions:
__init__ - Initiates an instance of a Dataset
get_statistics - Updates statistic attributes in case of
change to dataset
"""
def __init__(self, df, label_names, protected_attribute_names,
map_func_pa=None, map_func_lab=None, title=None,
weights = None, pred_name = None, predictions = None,
map_func_pred=None, categorical_features=None,
model = None, training_features=None,
alter_dataframe=True):
"""
Arguments:
df - Pandas dataframe containing features, labels
and protected attributes. All data should be
numerical (NAs not allowed).
label_names - (list) Names of the labels of the data
map_func_pa - Function that maps protected attributes
to 0 or 1
default: None
map_func_lab - Function that maps labels to 0 or 1
default: None
title - (str) Name of dataset
default: None
weights - (numpy.ndarray) Weights of each row
default: None
pred_name - (str) Column containing predictions from
a classifier for dataset
default: None
predictions - (numpy.ndarray) Array containing predictions
from a classifier on dataset
default: None
map_func_pred - Function that maps predictions to 0 or 1
default: None
model - A model trained using the dataset
default: None
alter_dataframe - (boolean) Set to False if dataframe passed
to instance is already in the correct
format (mainly used by internal functions
within the package)
default: True
protected_attribute_name - (list) Names of the protected attributes
training_features - (list) List of features for training
default: None
categorical_features - (list) List of features that are
categorical
default: None
Raises:
- TypeError: Data must be a pandas dataframe
- TypeError: Certain fields must be np.ndarrays
- ValueError: np.ndarray shapes must match
"""
if not isinstance(df,pd.DataFrame):
raise TypeError("Data must be provided as a pandas dataframe")
if df is None:
raise TypeError("Data not presented. Must provide a pandas "
"DataFrame with features, labels and a "
"protected attribute.")
if df.isna().any().any():
raise ValueError("DataFrame cannot contain any NA values.")
self.dataframe = df
self.feature_names = [n for n in df.columns if
n not in label_names]
self.label_names = label_names
self.features = df[self.feature_names].values.copy()
self.labels = df[self.label_names].values.copy()
self.instance_names = df.index.astype(str).tolist()
self.pred_name = pred_name
self.predictions = predictions
self.cat_features = categorical_features
self.train_features = training_features
self.model = model
self.protected_attribute_names = protected_attribute_names
self.protected_attributes = (df.loc[:,protected_attribute_names]
.values.copy())
if not categorical_features is None:
LE = LabelEncoder()
for column in categorical_features:
self.dataframe[column] = LE.fit_transform(
self.dataframe[column])
if self.pred_name:
if map_func_pred:
self.dataframe['Predictions']=map_func_pred(
self.dataframe[pred_name].values.copy()
)
else:
self.dataframe['Predictions'] = (
self.dataframe[pred_name].values.copy()
)
elif not self.predictions is None:
self.dataframe['Prediction'] = self.predictions
self.dataframe.loc[self.dataframe['Prediction']>0.5,
'Prediction_binary'] = 1
self.dataframe.loc[self.dataframe['Prediction']<=0.5,
'Prediction_binary'] = 0
if weights is None:
self.instance_weights = np.ones_like(self.instance_names,
dtype=np.float64)
else:
self.instance_weights = weights
if alter_dataframe:
# Map protected attributes and labels to 0//1
if map_func_pa:
self.protected_attributes_binary = map_func_pa(
self.protected_attributes
)
else:
self.protected_attributes_binary = self.protected_attributes
if map_func_lab:
self.labels_binary = map_func_lab(self.labels)
else:
self.labels_binary = self.labels
self.dataframe['Protected'] = self.protected_attributes_binary
self.dataframe['Label_binary'] = self.labels_binary
self.dataframe['Weight'] = self.instance_weights
# Set dataset title
if title:
self.title = title
else:
self.title = 'Dataset_{}'.format(datetime.now()
.strftime('%y_%m_%d_%H:%M:%S'))
# Basic statistics
self.get_statistics()
def get_statistics(self,reference='label'):
"""
updates statistic attributes in case of change to dataset
Args:
reference - (str) Should be 'label' or 'prediction'
depending on which kind of statistics
are desirable
default: 'label'
Raises:
ValueError: Reference must be either 'label' or 'prediction'
"""
if reference == 'label':
reference = 'Label_binary'
elif reference == 'prediction':
reference = 'Prediction_binary'
else:
raise ValueError("Reference must be either 'label' or 'prediction'")
df = self.dataframe
num_prot = len(df[df['Protected'] == 1])
num_unprot = len(df[df['Protected'] == 0])
pos_prot = sum(df[(df['Protected']==1)&
(df[reference]==1)]['Weight'])
pos_unprot = sum(df[(df['Protected']==0)&
(df[reference]==1)]['Weight'])
pr_prot = pos_prot / num_prot
pr_unprot = pos_unprot / num_unprot
self.Statistics = {
'Protected': {
'Number' : num_prot,
'Positive' : pos_prot,
'Percentage' : pr_prot
},
'Unprotected': {
'Number' : num_unprot,
'Positive' : pos_unprot,
'Percentage' : pr_unprot
}
} | AIBias-Oddgeir | /AIBias-Oddgeir-0.1.0.tar.gz/AIBias-Oddgeir-0.1.0/aibias/dataset.py | dataset.py |
import numpy as np
import aibias.dataset as ds
#===================================================
# DISPARATE IMPACT
#===================================================
def DisparateImpact(dataset,reference='label'):
"""
The ratio in probability of favorable outcomes between unprivileged
and privileged groups.
DI = Pr(C = YES | X = 0) / Pr(C = YES | X = 1)
Arguments:
dataset - A dataset object
Returns:
DI - (float) Disparate Impact of dataset
Error:
TypeError - If the given dataset is not of type
aibias.dataset.Dataset
"""
if not isinstance(dataset,ds.Dataset):
raise TypeError("Dataset must be of type aibias.dataset.Dataset")
dataset.get_statistics(reference)
pr_prot = dataset.Statistics['Protected']['Percentage']
pr_unprot = dataset.Statistics['Unprotected']['Percentage']
return pr_prot / pr_unprot
#===================================================
# STATISTICAL PARITY DIFFERENCE
#===================================================
def StatisticalParityDifference(dataset,reference='label'):
"""
The difference in the probability of favorable outcomes between the
unprivileged and privileged groups
SPD = Pr(C = YES | X = 1) - Pr(C = YES | x = 0)
Arguments:
dataset - A dataset object
Returns:
SPD - (float) Statistical Parity Difference of dataset
Error:
TypeError - If the given dataset is not of type
aibias.dataset.Dataset
"""
if not isinstance(dataset,ds.Dataset):
raise TypeError("Dataset must be of type aibias.dataset.Dataset")
dataset.get_statistics(reference)
pr_prot = dataset.Statistics['Protected']['Percentage']
pr_unprot = dataset.Statistics['Unprotected']['Percentage']
return pr_prot - pr_unprot
#===================================================
# AVERAGE ODDS DIFFERENCE
#===================================================
def AverageOddsDifference(dataset):
"""
The average of difference in flase positive rates and true positive
rates between unprivileged and privilged groups
AOD = AVG(TPR,FPR | X = 1) - AVG(TPR,FPR | X = 0)
Arguments:
dataset - A dataset object
predictions - An array containing the model predictions
Returns:
AOD - (float) Average Odds Difference of dataset
Error:
TypeError - If the given dataset is not of type
aibias.dataset.Dataset
TypeError - If the predictions are not of type np.ndarray
ValueError - If the shape of predictions does not match the
size of the dataset
"""
dataset.get_statistics()
if not isinstance(dataset,ds.Dataset):
raise TypeError("Dataset must be of type aibias.dataset.Dataset")
df = dataset.dataframe
if not 'Prediction' in df.columns:
raise ValueError("No predictions included in dataset")
tp_prot = df[ (df['Protected']==1)&(df['Label_binary']==1)&
(df['Prediction_binary']==1)]
fp_prot = df[ (df['Protected']==1)&(df['Label_binary']==0)&
(df['Prediction_binary']==1)]
tp_unprot = df[ (df['Protected']==0)&(df['Label_binary']==1)&
(df['Prediction_binary']==1)]
fp_unprot = df[ (df['Protected']==0)&(df['Label_binary']==0)&
(df['Prediction_binary']==1)]
tpr_prot = len(tp_prot) / dataset.Statistics['Protected']['Positive']
fpr_prot = len(fp_prot) / dataset.Statistics['Protected']['Positive']
tpr_unprot = len(tp_unprot) / dataset.Statistics['Unprotected']['Positive']
fpr_unprot = len(fp_unprot) / dataset.Statistics['Unprotected']['Positive']
avg_prot = (tpr_prot + fpr_prot) / 2
avg_unprot = (tpr_unprot + fpr_unprot) / 2
return avg_prot - avg_unprot
#===================================================
# EQUAL OPPORTUNITY DIFFERENCE
#===================================================
def EqualOpportunityDifference(dataset):
"""
The difference in true positive rates between unprivileged and
privileged groups
EOD = {TPR | Protected} - {TPR | Unprotected}
Arguments:
dataset - A dataset object
predictions - An array containing the model predictions
Returns:
EOD - (float) Equal Opoortunity Difference of dataset
Error:
TypeError - If the given dataset is not of type
aibias.dataset.Dataset
TypeError - If the predictions are not of type np.ndarray
ValueError - If the shape of predictions does not match the
size of the dataset
"""
dataset.get_statistics()
if not isinstance(dataset,ds.Dataset):
raise TypeError("Dataset must be of type aibias.dataset.Dataset")
df = dataset.dataframe
if not 'Prediction' in df.columns:
raise ValueError("No predictions included in dataset")
tp_prot = df[ (df['Protected']==1)&(df['Label_binary']==1)&
(df['Prediction_binary']==1)]
tp_unprot = df[ (df['Protected']==0)&(df['Label_binary']==1)&
(df['Prediction_binary']==1)]
tpr_prot = len(tp_prot) / dataset.Statistics['Protected']['Positive']
tpr_unprot = len(tp_unprot) / dataset.Statistics['Unprotected']['Positive']
return tpr_prot - tpr_unprot | AIBias-Oddgeir | /AIBias-Oddgeir-0.1.0.tar.gz/AIBias-Oddgeir-0.1.0/aibias/metrics.py | metrics.py |
import os
import sys
import numpy as np
import pandas as pd
from datetime import datetime
import aibias.dataset as ds
import aibias.metrics as met
import matplotlib.pyplot as plt
class Visualization():
"""
A class that handles visualizations of metrics for datasets
Attributes:
datasets - (list) List of datasets to be visualizes
metrics - (list) List of metrics implemented
metrics_short - (list) List of short hand names of metrics
X - (int) Size of x-axis of figure
Y - (int) Size of y-axis of figure
save - (boolean) To save the figures
reference - (str) 'label' or 'prediction', for determening
which to use in Disparate Impact and Statistical
Parity Difference
Functions:
visualize_metric - Generates a figure with visualization of the
chosen metric for given datasets
visualize_metrics - Generates figures for each metric implemented
for given datasets
"""
def __init__(self, datasets, figsize_X=5, figsize_Y=5,
save=False,reference='label'):
"""
Arguments:
dataset - (list) List of datasets to be visualized
figsize_X - (int) Size of the x-axis of figure
default: 5
figsize_Y - (int) Size of the y-axis of figure
default: 5
save - (boolen) To save the figure
default: False
reference - (str) 'label' or 'prediction', for determening
which to use in Disparate Impact and Statistical
Parity Difference
default: 'label'
raises:
TypeError: Datasets must be of type aibias.dataset.Dataset
"""
if isinstance(datasets,list):
self.datasets = datasets
else:
self.datasets = [datasets]
for dataset in datasets:
if not isinstance(dataset,ds.Dataset):
raise TypeError("Datasets must be of type aibias.dataset.Dataset")
self.metrics = [
'DisparateImpact',
'StatisticalParityDifference',
'AverageOddsDifference',
'EqualOpportunityDifference'
]
self.metrics_short = ['di','spd','aod','eod']
self.X = figsize_X
self.Y = figsize_Y
self.save = save
self.reference = reference
def visualize_metric(self,metric,rotation=90,annotation=True,
references = None, **kwargs):
"""
Generates a figure with visualization of the chosen metric for
given datasets
Arguments:
metric - (str) Metric to visualize
rotation - (int) Rotation of bar labels
default: 90
annotation - (booleon) To annotate bars with values
default: True
references - (list) List of references ('label' or
'prediction') for when different datasets
use different references
default: None
kwargs:
All kwargs that could be used with pyplot.bar
Raises:
ValueError: metric must be one of the implemented metrics
ValueError: Dataset must contain predictions
- For AverageOddsDifference and
EqualOpportunityDifference
"""
if (not metric in self.metrics and
not metric.lower() in self.metrics_short):
raise ValueError("`metric` must be one of the following: " +
"{0[0]} (di), {0[1]} (spd),{0[2]} (aod), {0[3]} (eod)"
.format(self.metrics))
# Disparate Impact
if metric.lower() == 'di' or metric == self.metrics[0]:
values = list()
titles = list()
ylable = self.metrics[0]
for i,ds in enumerate(self.datasets):
if not references is None:
di = met.DisparateImpact(ds,references[i])
else:
di = met.DisparateImpact(ds,self.reference)
values.append(di)
titles.append(ds.title)
# Statistical Parity Difference
elif metric.lower() == 'spd' or metric == self.metrics[1]:
values = list()
titles = list()
ylable = self.metrics[1]
for i,ds in enumerate(self.datasets):
if not references is None:
spd = met.StatisticalParityDifference(ds,references[i])
else:
spd = met.StatisticalParityDifference(ds,self.reference)
values.append(spd)
titles.append(ds.title)
# Average Odds Difference
elif metric.lower() == 'aod' or metric == self.metrics[2]:
values = list()
titles = list()
ylable = self.metrics[2]
for ds in self.datasets:
if not 'Prediction' in ds.dataframe.columns:
raise ValueError("No predictions included in dataset")
aod = met.AverageOddsDifference(ds)
values.append(aod)
titles.append(ds.title)
# Equal Odds Difference
elif metric.lower() == 'eod' or metric == self.metrics[3]:
values = list()
titles = list()
ylable = self.metrics[3]
for ds in self.datasets:
if not 'Prediction' in ds.dataframe.columns:
raise ValueError("No predictions included in dataset")
eod = met.EqualOpportunityDifference(ds)
values.append(eod)
titles.append(ds.title)
# Create figure
fig = plt.figure(figsize=(self.X,self.Y))
plt.gca().yaxis.grid()
plt.gca().set_axisbelow(True)
# Create bars
kwargs.setdefault('color','cyan')
kwargs.setdefault('edgecolor','black')
kwargs.setdefault('alpha',0.5)
plt.bar(titles,values,**kwargs)
# Set fairness line
if metric.lower() == 'di' or metric == self.metrics[0]:
fair = 1
else:
fair = 0
num_ds = len(self.datasets)
plt.text(num_ds+0.5,fair,'Fair',fontsize=14)
plt.hlines(fair,-1,num_ds,label='Fair',linewidth=2)
# Set labels
plt.ylabel(ylable)
plt.xlabel('Datasets')
plt.setp(plt.gca().get_xticklabels(), rotation=rotation,
horizontalalignment='right')
# Annotate bars
if annotation:
for i, val in enumerate(values):
y_val = max(0,val) + 0.05
val = np.round(val,decimals=4)
if fair == 1 and abs(y_val-1) < 0.1:
y_val = 1.05
plt.annotate(f'{val}\n',xy=(titles[i],y_val),
ha='center',va='center')
# Set y axis range
y_max = max(values+[0]) if fair == 0 else max(values+[1])
y_min = min(values+[0])
plt.ylim(y_min-.5,y_max+.5)
# Save figure
if self.save:
plt.savefig('Dataset_graph_{}'.format(datetime.now()
.strftime('%y_%m_%d_%H:%M:%s')))
plt.show()
def visualize_metrics(self, rotation=90,
references = None, **kwargs):
"""
Generates figures for each metric implemented for given datasets
Arguments:
rotation - (int) Rotation of bar labels
default: 90
references - (list) List of references ('label' or
'prediction') for when different datasets
use different references
default: None
kwargs:
All kwargs that could be used with pyplot.bar
"""
for metric in self.metrics:
self.visualize_metric(metric,rotation,
references=references, **kwargs) | AIBias-Oddgeir | /AIBias-Oddgeir-0.1.0.tar.gz/AIBias-Oddgeir-0.1.0/aibias/visualization.py | visualization.py |
import sys
import numpy as np
import pandas as pd
import tqdm
import tensorflow_addons as tfa
import aibias.dataset as ds
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
#======================================
# PREJUDICE REMOVER
#======================================
class PR_remover():
"""
Creates and trains a linear regression model that utilizes
the 'Prejudice Remover' regularizer as is described in the paper
'Fairness-aware Classifier with Prejudice Remover Regularizer'
Toshihiro Kamishima et al. (2012)
Attributes:
dataset - (Dataset) Dataset object
epochs - (int) Number of epochs in training
eta - (float) Scalar for the PR regularizer
ntrain - (int) Number of samples chosen from each real label
num_samples - (int) Number of samples in dataset
num_f - (int) Number of training features in dataset
Functions:
process_data - Processes the data from the Dataset object so
that it is compatible with the training model
fit - Train the model using the PR regularizer
transform - Generate predictions for dataset and return
a new dataset object with the model and predictions
"""
def __init__(self,dataset,epochs=1,eta=0.5,ntrain=1000):
"""
Arguments:
dataset - (aibias.dataset.Dataset) Object containing the
dataset to be trained
epochs - (int) Number of epochs to train the model
Default: 1
eta - (float/int) Scalar for the PR regularizer
default: 0.5
ntrain - (int) Number of samples chosen from each real label
default: 1000
"""
self.dataset = dataset
self.epochs = epochs
self.eta = eta
self.ntrain = ntrain
self.num_samples = len(dataset.dataframe)
self.num_f = len(self.dataset.train_features)
self.process_data()
def process_data(self):
"""
Processes the data from the Dataset object so that it
is compatible with the training model.
"""
if not self.dataset.train_features:
raise ValueError("Dataset must contain designated training features")
self.df = self.dataset.dataframe.copy()
self.X = self.df[self.dataset.train_features]
self.Y = self.df['Label_binary']
prot_idx = self.df[self.df['Label_binary']==1].index.values
unprot_idx = self.df[self.df['Label_binary']==0].index.values
prot_row = np.random.choice(prot_idx,self.ntrain)
unprot_row = np.random.choice(unprot_idx,self.ntrain)
rows = np.concatenate((prot_row, unprot_row))
self.samples = self.df.loc[rows]
X_val = self.samples[self.dataset.train_features].values.copy()
Y_val = self.samples['Label_binary'].values.copy()
LE = LabelEncoder()
Y_val = LE.fit_transform(Y_val)
scaler = StandardScaler()
X_val = scaler.fit_transform(X_val)
is_numeric = self.X.apply(lambda s: pd
.to_numeric(s, errors='coerce')
.notnull().all()
)
if any(res == False for res in is_numeric):
raise ValueError("Training features must be numeric")
self.X_val = np.array(X_val,dtype='float32')
self.Y_val = np.array(Y_val,dtype='float32')
self.num_p_u = [
self.dataset.Statistics['Unprotected']['Number'],
self.dataset.Statistics['Protected']['Number']
]
self.S = self.samples['Protected'].values.copy().astype(int)
def PR_regularizer(self,weights):
"""
Prejudice remover regularizer. Punishes model for
relying too much on sensitive variables.
Arguments:
weights - (tensor) Weights of the model
returns:
PR regularization along with l2 regularization
"""
def sigmoid(x,w):
dot = np.dot(x,w)
return 1 / (1 + np.exp(-dot))
weights = weights.numpy().reshape([2,self.num_f])
# p = Pr[y|x,s] = sigmoid(w(s)^T,x)
# p = np.ndarray, len: num_f
p = np.array([sigmoid(self.X_val[i,:],weights[self.S[i],:])
for i in range(len(self.X_val))])
# q = Pr[y|s} = \sum_({xi,si)in D st si=s} sigma(xi,si) / |D[s]|
# q = np.ndarray, len: 2
q = np.array([np.sum(p[self.S == si])
for si in [0,1]]) / self.num_p_u
# r = Pr[y] = \sum_{(xi.si) in D} sigma(xi,si) / |D|
# r = numpy.float64
r = np.sum(p) / len(self.X_val)
# f = \sum_{x,s,y in D}
# sigma(x,x) [log(q(s) - log(r)] +
# (1 - sigma(x,s) [log(1-q(s)) - log(1-r)]
# f = numpy.float64
f = np.sum(p * (np.log(q[self.S]) - np.log(r))
+ (1.0-p) * (np.log(1.0-q[self.S])
- np.log(1.0-r))
)
# l2reg = numpy.float64
l2_reg = np.sum(0.5*np.square(weights))
return self.eta*f + l2_reg
def fit(self):
"""
Train the model using the PR regularizer
Returns:
History of the training session
"""
tqdm_callback = tfa.callbacks.TQDMProgressBar()
def build_model():
model = Sequential()
model.add(Dense(60,activation='relu'))
model.add(Dense(60,activation='relu'))
model.add(Dense(self.num_f,activation='relu'))
model.add(Dense(2,
activation='relu',
kernel_regularizer=self.PR_regularizer
))
model.add(Dense(1,activation= 'sigmoid'))
model.compile(optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = ['accuracy'],
run_eagerly = True)
return model
self.model = build_model()
history = self.model.fit(self.X_val, self.Y_val,
batch_size = 32,
callbacks = [tqdm_callback],
epochs = self.epochs)
return history
def transform(self):
"""
Generate predictions for dataset and return a new dataset
object with the model and predictions
Returns:
A new dataset object with predictions and pre-trained model
Raises:
AttributeError - Transform should not be called before fit
"""
if not hasattr(self,'model'):
raise AttributeError("Transform should not be called before fit")
scalar = StandardScaler()
X_val = self.X.values.copy()
X_val = scalar.fit_transform(X_val)
predictions = self.model.predict(X_val)
transformed_dataset = ds.Dataset(
self.df,
label_names = self.dataset.label_names,
protected_attribute_names=self.dataset.protected_attribute_names,
title = self.dataset.title + ' (PrejudiceRemover)',
predictions = predictions,
categorical_features = self.dataset.cat_features,
training_features = self.dataset.train_features,
model = self.model,
alter_dataframe = False)
return transformed_dataset
def PrejudiceRemover(dataset,epochs,eta=5,ntrain=1000):
"""
Creates a PR_remover object that handles creation and training
of a binary classifier (logistic regression) model using the
PR regularizer.
Arguments:
dataset - (aibias.dataset.Dataset) Object containing the
dataset to be trained
epochs - (int) Number of epochs to train the model
default: 1
eta - (float) Scalar for the PR regularizer
default: 0.5
ntrain - (int) Number of samples chosen from each real label
default: 1000
Returns:
New dataset object containing the trained model along with
predictions for each row
Raises:
TypeError: Dataset must be of type aibias.dataset.Dataset
"""
if not isinstance(dataset,ds.Dataset):
raise TypeError("Dataset must be of type aibias.dataset.Dataset")
print('Preparing data')
pr_remover = PR_remover(dataset,epochs,eta,ntrain)
print('Starting training session')
pr_remover.fit()
return pr_remover.transform() | AIBias-Oddgeir | /AIBias-Oddgeir-0.1.0.tar.gz/AIBias-Oddgeir-0.1.0/aibias/algorithms/in_processing.py | in_processing.py |
import os
import sys
import numpy as np
import pandas as pandas
import aibias.dataset as ds
#==================================================
# REWEIGHING
#==================================================
def Reweigh(dataset):
"""
Adjusts the weights of each 'individual' such that those within the
protected group with a positive and label and thouse without with a
negative label are weighed higher, and those within the protected
group with a negative lable and those without with a positive labbel
are weighed lower
Arguments:
dataset - (aibias.dataset.Dataset) A dataset object
Returns:
A new dataset object with new weights for each row
Raises:
TypeError - If the given dataset is not of type
aibias.dataset.Dataset
"""
if not isinstance(dataset,ds.Dataset):
raise TypeError("Dataset must be of type aibias.dataset.Dataset")
df = dataset.dataframe.copy()
num_pos = len(df[df['Label_binary']==1])
num_neg = len(df) - num_pos
num_tot = len(df)
pr_pos = num_pos / num_tot
pr_neg = num_neg / num_tot
pre_computed = {
0: pr_neg,
1: pr_pos
}
weights = {
0: dict(),
1: dict()
}
for prot in [0,1]: # Proteced vs Unproteced
for lab in [0,1]: # Favorable lable vs Unfavorable
# Compute expected probability
num_cur = len(df[df['Protected'] == prot])
pr_cur = num_cur / num_tot
pr_lab = pre_computed[lab]
pr_exp_cur = pr_lab * pr_cur
# Compute observed probability
num_obs_cur = len(df[(df['Protected']==prot)
&(df['Label_binary']==lab)])
pr_obs_cur = num_obs_cur / num_tot
# Compute weight
weight = pr_exp_cur / pr_obs_cur
weights[prot][lab] = weight
weights[prot]['pr_exp_cur || {}'.format(lab)] = pr_exp_cur
weights[prot]['pr_obs_cur || {}'.format(lab)] = pr_obs_cur
# Update weights
for prot in [0,1]:
for lab in [0,1]:
df.loc[(df['Protected']==prot)
&(df['Label_binary']==lab),
'Weight'] = weights[prot][lab]
# Create new dataset object with the transformed dataset
transformed_dataset = ds.Dataset(
df,
label_names = dataset.label_names,
protected_attribute_names = dataset.protected_attribute_names,
title = dataset.title + ' (Reweighed)',
weights = df['Weight'].values.copy(),
training_features = dataset.train_features,
categorical_features = dataset.cat_features,
alter_dataframe = False)
return transformed_dataset | AIBias-Oddgeir | /AIBias-Oddgeir-0.1.0.tar.gz/AIBias-Oddgeir-0.1.0/aibias/algorithms/pre_processing.py | pre_processing.py |
import os
import numpy as np
import scipy.constants
from scipy.integrate import quad
from aicon.tools import Get_GVD, calc_MFPS
import pymatgen as pmg
import pandas as pd
Planck = scipy.constants.hbar
Boltzm = scipy.constants.Boltzmann
atommass = scipy.constants.atomic_mass
def Get_Phonon(filepath, Temp, ifscale):
'''For lattice thermal conductivity calculation '''
kappa = Phonon(filepath)
kappa.Get_Kappa(filepath, Temp, ifscale=ifscale)
kappa.Output(Temp)
def t_Umklapp(grun,velo,Debye,mass,T): #relaxation time of umklapp process
return (grun**2 * Boltzm**2 * T**3)/(mass * velo**2 * Debye * Planck) * np.exp(-Debye/(3*T))
def t_Normal(grun,velo,mass,vol,T): #relaxation time of normal process
return (grun**2 * (Boltzm*1e23)**5 * (T*1e-2)**5 * (vol*1e30))/((mass*1e27) * (velo*1e-3)**5 * (Planck*1e34)**4) * 1e13
def t_Isotope(velo,vol,abund,T): #relaxation time of isotope scattering
return ((vol*1e30) * (Boltzm*1e23)**4 * abund * (T*1e-2)**4)/(4 * np.pi * (Planck*1e34)**4 * (velo*1e-3)**3) * 1e13
def constC(velo):
return Boltzm**4/(2 * np.pi**2 * Planck**3 * velo) #
def get_fun1(x,RT_N,RT_U,RT_ISO):
return 1/(RT_N * x + RT_U * x**2 + RT_ISO * x**4) * x**4 * np.exp(x)/(np.exp(x)-1)**2
def get_fun2(x,RT_N,RT_U,RT_ISO):
return RT_N/(RT_N + RT_U * x + RT_ISO * x**3) * x**4 * np.exp(x)/(np.exp(x)-1)**2
def get_fun3(x,RT_N,RT_U,RT_ISO):
return RT_N * (RT_U + RT_ISO * x**2) /(RT_N + RT_U * x + RT_ISO * x**3) * x**6 * np.exp(x)/(np.exp(x)-1)**2
def get_fun4(x,RT_N,RT_U,RT_ISO):
return 1/(RT_N + RT_U + RT_ISO * x**2) * x**2 * np.exp(x)/(np.exp(x)-1)**2
def get_fun5(x,RT_N,RT_U,RT_ISO):
return RT_N/(RT_N + RT_U + RT_ISO * x**2) * x**4 * np.exp(x)/(np.exp(x)-1)**2
def get_fun6(x,RT_N,RT_U,RT_ISO):
return RT_N * (RT_U + RT_ISO * x**2)/(RT_N + RT_U + RT_ISO * x**2) * x**6 * np.exp(x)/(np.exp(x)-1)**2
class Phonon(object):
''' Phonon related properties class. '''
def __init__(self, filepath):
self.struct = pmg.core.Structure.from_file(filepath + 'POSCAR')
self.M_avg = 0.0
for ele in self.struct.symbol_set:
self.M_avg = self.M_avg + pmg.core.Element(ele).atomic_mass * self.struct.composition.get_atomic_fraction(ele)
self.M_avg = atommass * self.M_avg
self.V_avg = self.struct.volume/self.struct.composition.num_atoms * 1e-30
self.prims = self.struct.get_primitive_structure()
self.Vol = self.prims.volume * 1e-30
self.p = self.prims.composition.num_atoms
def Get_Para(self, filepath):
(self.gruneisen, self.velocity, self.DebyeT, self.freq, self.optic_base) = Get_GVD(filepath)
self.velocity = self.velocity * 1e2
self.abund = calc_MFPS(list(self.struct.symbol_set))
self.ADebye = self.DebyeT[2]
self.ODebye = self.DebyeT[3]
print(self.gruneisen)
print(self.velocity)
print(self.DebyeT)
print(self.freq)
def HeatCapacity(self, ADebye, ODebye, T, struct): #function to calculate heat capacity
N = 1 # number of primitive cell
prims = struct.get_primitive_structure()
Vol = prims.volume * 1e-30 # primitive cell volume
p = prims.composition.num_atoms # atom number in primitive cell
fun = lambda x: x**4 * np.exp(x)/(np.exp(x)-1)**2
Cv_aco = 9 * N/Vol * Boltzm * (T/ADebye)**3 * quad(fun,0,ADebye/T)[0]
Cv_opt = (3*p-3) * N/Vol * Boltzm * (ODebye/T)**2 * np.exp(ODebye/T)/(np.exp(ODebye/T)-1)**2
return Cv_aco, Cv_opt
def Output(self, Temp):
Kappa_dict={"Temp": Temp, "Kappa": self.avgkappa, \
"TA_N": self.relaxtime[:,0,0], "TA_U": self.relaxtime[:,0,1], "TA_ISO": self.relaxtime[:,0,2],\
"TA\'_N": self.relaxtime[:,1,0], "TA\'_U": self.relaxtime[:,1,1], "TA\'_ISO": self.relaxtime[:,1,2],\
"LA_N": self.relaxtime[:,2,0], "LA_U": self.relaxtime[:,2,1], "LA_ISO": self.relaxtime[:,2,2],\
"O_N": self.relaxtime[:,3,0], "O_U": self.relaxtime[:,3,1], "O_ISO": self.relaxtime[:,3,2]}
Kappa_FILE = pd.DataFrame(Kappa_dict)
Kappa_FILE.to_excel('Kappa.xlsx', index_label='index', merge_cells=False)
# Kappa_FILE.to_csv('Kappa.csv', index_label='index')
def Get_Kappa(self, filepath, Temp, ifscale=False):
'''
Calculate lattice thermal conductivity at given temperature.
Parameters:
----------
filepath: str
The directory contain POSCAR, band.yaml and gruneisen.yaml files.
Temp: list
The list of temperature.
ifscale: bool
If True, the calculated Kappa will be multiplied with a predefined factor.
The default value is False.
'''
self.kappa = np.zeros((len(Temp), 4))
self.avgkappa = np.zeros(len(Temp))
self.relaxtime = np.zeros((len(Temp), 4, 3))
self.ratio = np.zeros(len(Temp))
self.Get_Para(filepath)
if ifscale:
scale = 0.60371
else:
scale = 1.0
for k in np.arange(len(Temp)):
T = Temp[k]
for branch in np.arange(4):
if branch == 0 or branch == 1: # two transverse acoustic branches
coef_TU = t_Umklapp(self.gruneisen[branch],self.velocity[branch],self.DebyeT[branch],self.M_avg,T)
coef_TN = t_Normal(self.gruneisen[branch], self.velocity[branch], self.M_avg, self.V_avg, T)
coef_TISO = t_Isotope(self.velocity[branch], self.V_avg, self.abund, T)
C_T = constC(self.velocity[branch])
IT_1 = C_T * T**3 * quad(get_fun1, 0.0, self.DebyeT[branch]/T, args=(coef_TN,coef_TU,coef_TISO))[0]
BettaT_1 = quad(get_fun2, 0.0, self.DebyeT[branch]/T, args=(coef_TN,coef_TU,coef_TISO))[0]
BettaT_2 = quad(get_fun3, 0.0, self.DebyeT[branch]/T, args=(coef_TN,coef_TU,coef_TISO))[0]
IT_2 = C_T * T**3 * BettaT_1**2/BettaT_2
self.kappa[k, branch] = 1/3 * (IT_1 + IT_2)
self.relaxtime[k, branch, 0] = 1 / (coef_TN * self.DebyeT[branch]/T)
self.relaxtime[k, branch, 1] = 1 / (coef_TU * (self.DebyeT[branch]/T)**2)
self.relaxtime[k, branch, 2] = 1 / (coef_TISO * (self.DebyeT[branch]/T)**4)
elif branch == 2: # one longitudinal acoustic branch
coef_LU = t_Umklapp(self.gruneisen[branch],self.velocity[branch],self.DebyeT[branch],self.M_avg,T)
coef_LN = t_Normal(self.gruneisen[branch],self.velocity[branch],self.M_avg,self.V_avg,T)
coef_LISO = t_Isotope(self.velocity[branch],self.V_avg,self.abund,T)
C_L = constC(self.velocity[branch])
IL_1 = C_L * T**3 * quad(get_fun4, 0.0, self.DebyeT[branch]/T, args=(coef_LN,coef_LU,coef_LISO))[0]
BettaL_1 = quad(get_fun5, 0.0, self.DebyeT[branch]/T, args=(coef_LN,coef_LU,coef_LISO))[0]
BettaL_2 = quad(get_fun6, 0.0, self.DebyeT[branch]/T, args=(coef_LN,coef_LU,coef_LISO))[0]
IL_2 = C_L * T**3 * BettaL_1**2/BettaL_2
self.kappa[k, branch] = 1/3 * (IL_1 + IL_2)
self.relaxtime[k, branch, 0] = 1 / (coef_LN * (self.DebyeT[branch]/T)**2)
self.relaxtime[k, branch, 1] = 1 / (coef_LU * (self.DebyeT[branch]/T)**2)
self.relaxtime[k, branch, 2] = 1 / (coef_LISO * (self.DebyeT[branch]/T)**4)
else: # optical branches
coef_OU = t_Umklapp(self.gruneisen[branch],self.velocity[branch],self.DebyeT[branch],self.M_avg,T)
coef_ON = t_Normal(self.gruneisen[branch],self.velocity[branch],self.M_avg,self.V_avg,T)
coef_OISO = t_Isotope(self.velocity[branch],self.V_avg,self.abund,T)
Normal_time = 1.0 / (coef_ON * (self.DebyeT[branch]/T)**2)
Resist_time = 1.0 / (coef_OU * (self.DebyeT[branch]/T)**2 + coef_OISO * (self.DebyeT[branch]/T)**4)
Total_time = 1.0 / (1.0/Normal_time + 1.0/Resist_time)
IO_1 = (3 * self.p -3) * 1/self.Vol * Boltzm * (self.ODebye/T)**2 * np.exp(self.ODebye/T)/(np.exp(self.ODebye/T)-1)**2 * self.velocity[branch]**2 * Total_time
IO_2 = (3 * self.p -3) * 1/self.Vol * Boltzm * (self.ODebye/T)**2 * np.exp(self.ODebye/T)/(np.exp(self.ODebye/T)-1)**2 * self.velocity[branch]**2 * Total_time/Normal_time * Resist_time
self.kappa[k, branch] = 1/3 * (IO_1 + IO_2)
self.relaxtime[k, branch, 0] = 1 / (coef_ON * (self.DebyeT[branch]/T)**2)
self.relaxtime[k, branch, 1] = 1 / (coef_OU * (self.DebyeT[branch]/T)**2)
self.relaxtime[k, branch, 2] = 1 / (coef_OISO * (self.DebyeT[branch]/T)**4)
self.avgkappa[k] = scale * np.sum(self.kappa[k, :]) | AICON | /AICON-2.0.1-py3-none-any.whl/aicon/phonon.py | phonon.py |
import os
import numpy as np
from numpy.linalg import inv
import scipy.constants
import pymatgen as pmg
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
from pymatgen.io.vasp.inputs import Kpoints
planck = scipy.constants.h
Boltzm = scipy.constants.Boltzmann
MassFluct = {'H':1.1460e-4, 'He':8.3232e-8, 'Li':14.58e-4, 'Be':0.0, 'B':13.54e-4, 'C':7.38695e-05, 'N':1.8577e-05, 'O':3.3590e-05, 'F':0.0, 'Ne':8.2792e-4,
'Na':0.0, 'Mg':7.3989e-4, 'Al':0.0, 'Si':2.01222e-4, 'P':0.0, 'S':1.6808e-4, 'Cl':5.8237e-4, 'Ar':3.50987e-05,
'K':1.64003e-4, 'Ca':2.9756e-4, 'Sc':0.0, 'Ti':2.8645e-4, 'V':9.5492e-07, 'Cr':1.3287e-4, 'Mn':0.0, 'Fe':8.2444e-05, 'Co':0.0, 'Ni':4.3071e-4, 'Cu':2.10858e-4, 'Zn':5.9594e-4, 'Ga':1.9713e-4, 'Ge':5.87597e-4, 'As':0.0, 'Se':4.6268e-4, 'Br':1.56275e-4, 'Kr':2.4849e-4,
'Rb':1.0969e-4, 'Sr':6.0994e-05, 'Y':0.0, 'Zr':3.42626e-4, 'Nb':0.0, 'Mo':5.9793e-4, 'Tc':0.0, 'Ru':4.0663e-4, 'Rh':0.0, 'Pd':3.0945e-4, 'Ag':8.5796e-05, 'Cd':2.7161e-4, 'In':1.2456e-05, 'Sn':3.34085e-4, 'Sb':6.6075e-05, 'Te':2.8395e-4, 'I':0, 'Xe':2.6779e-4,
'Cs':0.0, 'Ba':6.2368e-05, 'La':4.7603e-08, 'Ce':2.2495e-05, 'Pr':0.0, 'Nd':2.3159e-4, 'Pm':0.0, 'Sm':3.3472e-4, 'Eu':4.32889e-05, 'Gd':1.27677e-4, 'Tb':0.0, 'Dy':5.20756e-05, 'Ho':0.0, 'Er':7.2459e-05, 'Tm':0.0, 'Yb':8.5449e-05, 'Lu':8.2759e-07, 'Hf':5.2536e-05,
'Ta':3.80667e-09, 'W':6.9669e-05, 'Re':2.7084e-05,'Os':7.4520e-05, 'Ir':2.5378e-05, 'Pt':3.39199e-05, 'Au':0.0, 'Hg':6.5260e-05, 'Tl':1.99668e-05, 'Pb':1.94476e-05, 'Bi':0.0}
def Generate_kpoints(struct, kppa):
'''
Gererate KPOINTS file with desired grid resolution.
Parameters:
----------
struct: pmg.core.structure object
kppa: float
The grid resolution in the reciprocal space, the unit is A-1.
'''
comment = "Kpoints with grid resolution = %.3f / A-1" % (kppa)
recip_lattice = np.array(struct.lattice.reciprocal_lattice.abc)/(2*np.pi)
num_div = [int(round(l / kppa)) for l in recip_lattice]
# ensure that numDiv[i] > 0
num_div = [i if i > 0 else 1 for i in num_div]
# VASP documentation recommends to use even grids for n <= 8 and odd
# grids for n > 8.
num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div]
style = Kpoints.supported_modes.Gamma
num_kpts = 0
return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0])
def get_highsympath(filename):
''' Get the high symmetry path of phonon spectrum. '''
struct = pmg.core.Structure.from_file(filename)
finder = SpacegroupAnalyzer(struct)
prims = finder.get_primitive_standard_structure()
HKpath = HighSymmKpath(struct)
Keys = list()
Coords = list()
for key in HKpath.kpath['kpoints']:
Keys.append(key)
Coords.append(HKpath.kpath['kpoints'][key])
count = 0
Keylist = list()
Coordslist = list()
for i in np.arange(len(Keys) - 1):
if (count-1)%3 == 0: #count-1 can be intergely divided by 3
Keylist.append(Keys[0])
Coordslist.append(Coords[0])
count+=1
Keylist.append(Keys[i+1])
Coordslist.append(Coords[i+1])
count+=1
if (count-1)%3 == 0:
Keylist.append(Keys[0])
Coordslist.append(Coords[0])
print('Please set \"BAND\" parameter of phonopy as this:%s' % os.linesep)
for coord in Coordslist:
print('%.4f %.4f %.4f ' % (coord[0], coord[1], coord[2]), end='')
print('%s' % os.linesep)
transmat = np.eye(3)
if prims.num_sites != struct.num_sites:
S_T = np.transpose(struct.lattice.matrix)
P_T = np.transpose(prims.lattice.matrix)
transmat = inv(S_T) @ P_T
print('We notice your structure could have a primitive cell. Please set \"PRIMITIVE_AXIS\" parameter of phonopy as this:%s' % os.linesep)
for coord in transmat:
print('%.8f %.8f %.8f ' % (coord[0], coord[1], coord[2]), end='')
print('%s' % os.linesep)
return Keylist, Coordslist, prims, transmat
def pbc_diff(fcoords1, fcoords2):
fdist = np.subtract(fcoords1, fcoords2)
return fdist - np.round(fdist)
def get_sym_eq_kpoints(struct, kpoint, cartesian=False, tol=1e-2):
'''Get the symmetry equivalent kpoints list'''
if not struct:
return None
sg = SpacegroupAnalyzer(struct)
symmops = sg.get_point_group_operations(cartesian=cartesian)
points = np.dot(kpoint, [m.rotation_matrix for m in symmops])
rm_list = []
# identify and remove duplicates from the list of equivalent k-points:
for i in range(len(points) - 1):
for j in range(i + 1, len(points)):
if np.allclose(pbc_diff(points[i], points[j]), [0, 0, 0], tol):
rm_list.append(i)
break
return np.delete(points, rm_list, axis=0)
def get_highsymweight(filename):
''' Get the multiplicity of the high symmetry path. '''
struct = pmg.core.Structure.from_file(filename)
HKpath = HighSymmKpath(struct)
Keys = list()
Coords = list()
for key in HKpath.kpath['kpoints']:
Keys.append(key)
Coords.append(HKpath.kpath['kpoints'][key])
Kweight = list()
for i in np.arange(len(Keys)):
if Keys[i] != '\Gamma':
Kweight.append(len(get_sym_eq_kpoints(struct, Coords[i]*0.5)))
return Keys, Coords, Kweight
def extract_GV(filepath):
'''Extract frequency and group velocity information. '''
fp1 = open(filepath + 'band.yaml','r')
keystr1 = "q-position"
keystr2 = "distance"
keystr3 = "frequency"
keystr4 = "group_velocity"
keystr5 = "nqpoint:"
keystr6 = "natom:"
npoints = 0
nbands = 0
countpoints = -1
countbands = 0
Gammaflag = 0
Gamma = list()
for eachline in fp1:
eachline = eachline.strip()
temp = eachline.split()
if len(temp) > 0:
if keystr5 == temp[0]:
npoints = int(temp[-1])
elif keystr6 in eachline:
nbands = int(temp[-1]) * 3
GroupVec = np.zeros((npoints, nbands+1))
Frequency = np.zeros((npoints, nbands+1))
elif keystr1 in eachline:
countpoints = countpoints + 1
countbands = 0
postemp = np.array([np.float(temp[i][:-1]) for i in np.arange(3,6)])
# print('%f %f %f' % (postemp[0],postemp[1],postemp[2]))
if postemp[0] == 0.0 and postemp[1] == 0.0 and postemp[2] == 0.0:
Gammaflag = 1
elif keystr2 in eachline:
#write distance value to the first column of np.array
GroupVec[countpoints,countbands] = np.float(temp[-1])
Frequency[countpoints,countbands] = np.float(temp[-1])
countbands = countbands + 1
if Gammaflag == 1:
Gammaflag = 0
if np.float(temp[-1]) not in Gamma:
Gamma.append(np.float(temp[-1]))
elif keystr3 in eachline:
Frequency[countpoints,countbands] = np.float(temp[-1])
elif keystr4 in eachline:
#write velocity value to the rest colume of each row of np .array
vectemp = np.array([np.float(temp[i][:-1]) for i in np.arange(2,5)])
vectemp2 = vectemp**2
GroupVec[countpoints,countbands] = np.sqrt(vectemp2.sum())
countbands = countbands + 1
else:
continue
else:
continue
fp1.close()
Gamma = np.array(Gamma)
return GroupVec,Frequency,Gamma
def extract_GrunP(filepath, nbands=9, npoints=255):
'''Extract gruneisen parameters information. '''
fp1 = open(filepath + 'gruneisen.yaml','r')
keystr1 = "q-position"
keystr2 = "distance"
keystr3 = "gruneisen"
keystr4 = "frequency"
datatype = np.dtype([('freq',np.float),('grun',np.float)])
Pathpot = np.zeros(npoints)
GruneisenPara = np.zeros((npoints, nbands),dtype=datatype)
countpoints = -1
countbands = 0
for eachline in fp1:
eachline = eachline.strip()
temp = eachline.split()
if len(temp) > 0:
if keystr1 in eachline:
countpoints = countpoints + 1
countbands = 0
elif keystr2 in eachline:
#write distance value to the first column of np.array
Pathpot[countpoints] = np.float(temp[-1])
elif keystr3 in eachline:
#write gruneisen value to the rest colume of each row of np.array
gruntemp = np.float(temp[-1])
elif keystr4 in eachline:
freqtemp = np.float(temp[-1])
GruneisenPara[countpoints,countbands] = np.array((freqtemp,gruntemp), dtype=datatype)
countbands = countbands + 1
else:
continue
else:
continue
fp1.close()
for j in range(npoints):
GruneisenPara[j,:] = np.sort(GruneisenPara[j,:], order='freq')
return Pathpot, GruneisenPara
def calc_MGV(filepath,weight):
'''Calculate branch velocity and frequency. '''
(GroupVec,Frequency,Gamma) = extract_GV(filepath)
Gamma_index = np.zeros((len(Gamma),2))
Bandnum = GroupVec.shape[1] - 1
Minindex = np.int(0)
Maxindex = np.int(GroupVec.shape[0] - 1)
#search groupvec to get Gamma positions
Pathnum = 0
for i in np.arange(len(Gamma)):
Gamma_index[i] = np.array([x_index for x_index,x_value in enumerate(GroupVec[:,0]) if x_value==Gamma[i]])
if Gamma_index[i,0] == Gamma_index[i,1]:Pathnum = Pathnum + 1
else: Pathnum = Pathnum + 2
#the following is for calculating average group velocity of different branch
modebranch_vel = np.zeros((Bandnum,Pathnum,5)) #the first dimension size equal natom*3
branch_vel = np.zeros(Bandnum)
for branch_idx in np.arange(Bandnum):
for j in np.arange(len(Gamma)): #
for k in np.arange(2):
if k == 0:
if Gamma_index[j,k] > Minindex:
modebranch_vel[branch_idx,j*2 + k] = [GroupVec[np.int(index),branch_idx+1] for index in np.arange(Gamma_index[j,k]-6,Gamma_index[j,k]-1)]
else: #actually, this sentence should never be executed and G point should never be the first point
modebranch_vel[branch_idx,j*2 + k] = [GroupVec[np.int(index),branch_idx+1] for index in np.arange(Gamma_index[j,k]+2,Gamma_index[j,k]+7)]
break
if k == 1:
if Gamma_index[j,k] < Maxindex:
modebranch_vel[branch_idx,j*2 + k] = [GroupVec[np.int(index),branch_idx+1] for index in np.arange(Gamma_index[j,k]+2,Gamma_index[j,k]+7)]
else:
break
for branch_idx in np.arange(Bandnum):
for j in np.arange(Pathnum):
branch_vel[branch_idx] = branch_vel[branch_idx] + weight[j] * np.average(modebranch_vel[branch_idx,j,:])
branch_vel[branch_idx] = branch_vel[branch_idx] / np.sum(weight)
#the following is for calculating average frequency of different branch
modebranch_freq = np.zeros((Bandnum,Pathnum,51))
branch_freq = np.zeros(Bandnum)
for branch_idx in np.arange(Bandnum):
for j in np.arange(len(Gamma)):
for k in np.arange(2):
if k == 0:
if Gamma_index[j,k] > Minindex:
modebranch_freq[branch_idx,j*2 + k] = [Frequency[np.int(index),branch_idx+1] for index in np.arange(Gamma_index[j,k]-50,Gamma_index[j,k]+1)]
else:
modebranch_freq[branch_idx,j*2 + k] = [Frequency[np.int(index),branch_idx+1] for index in np.arange(Gamma_index[j,k],Gamma_index[j,k]+51)]
break
if k == 1:
if Gamma_index[j,k] < Maxindex:
modebranch_freq[branch_idx,j*2 + k] = [Frequency[np.int(index),branch_idx+1] for index in np.arange(Gamma_index[j,k],Gamma_index[j,k]+51)]
else:
break
for branch_idx in np.arange(Bandnum):
for j in np.arange(Pathnum):
branch_freq[branch_idx] = branch_freq[branch_idx] + weight[j] * np.average(modebranch_freq[branch_idx,j,:])
branch_freq[branch_idx] = branch_freq[branch_idx] / np.sum(weight)
#the following is for calculating debye temperature for different branch
branch_DebyeT = np.zeros(Bandnum)
for branch_idx in np.arange(Bandnum):
branch_DebyeT[branch_idx] = planck * np.max(Frequency[:,branch_idx+1]) * 1e12/Boltzm
Optic_base = planck * np.min(Frequency[:,4:Bandnum]) * 1e12/Boltzm
return branch_vel,branch_freq,branch_DebyeT,Optic_base
def calc_MGP(filepath,weight): #Gamma:the position of Gamma, weight:multiplicity of Gamma points
'''Calculate branch gruneisen parameters.'''
(GroupVec,Freq,Gamma) = extract_GV(filepath)
Gamma_index = np.zeros((len(Gamma),2))
Bandnum = GroupVec.shape[1] - 1
Minindex = np.int(0)
Maxindex = np.int(GroupVec.shape[0] - 1)
Path, Gruneisen = extract_GrunP(filepath,Bandnum,GroupVec.shape[0])
Pathnum = 0
for i in np.arange(len(Gamma)):
Gamma_index[i] = np.array([x_index for x_index,x_value in enumerate(GroupVec[:,0]) if x_value==Gamma[i]])
if Gamma_index[i,0] == Gamma_index[i,1]:Pathnum = Pathnum + 1
else: Pathnum = Pathnum + 2
modebranch_grun = np.zeros((Bandnum,Pathnum,50)) # value for each path in each branch, exclude Gamma point
branch_grun = np.zeros(Bandnum) #average value for different branch
for branch_idx in np.arange(Bandnum):
for j in np.arange(len(Gamma)):
for k in np.arange(2):
if k == 0:
if Gamma_index[j,k] > Minindex:
modebranch_grun[branch_idx,j*2 + k] = [Gruneisen[np.int(index),branch_idx]['grun'] for index in np.arange(Gamma_index[j,k]-50,Gamma_index[j,k])]
else:
modebranch_grun[branch_idx,j*2 + k] = [Gruneisen[np.int(index),branch_idx]['grun'] for index in np.arange(Gamma_index[j,k]+1,Gamma_index[j,k]+51)]
break
if k == 1:
if Gamma_index[j,k] < Maxindex:
modebranch_grun[branch_idx,j*2 + k] = [Gruneisen[np.int(index),branch_idx]['grun'] for index in np.arange(Gamma_index[j,k]+1,Gamma_index[j,k]+51)]
else:
break
for branch_idx in np.arange(Bandnum):
for j in np.arange(len(weight)):
branch_grun[branch_idx] = branch_grun[branch_idx] + weight[j] * np.power(np.average(np.abs(modebranch_grun[branch_idx,j,:])),2)
branch_grun[branch_idx] = np.sqrt(branch_grun[branch_idx] / np.sum(weight))
return branch_grun
def Get_GVD(filepath):
'''
This function is used for obtaining the Gruneisen parameter, group velocity and Debye temperature for kappa calculation,
they are all four dimension including three acoustic branches and one "representive" optic branch.
'''
gruneisen = np.zeros(4)
velocity = np.zeros(4)
DebyeT = np.zeros(4)
freq = np.zeros(4)
(no1,no2,weight) = get_highsymweight(filepath + "POSCAR")
(branchvel,branchfreq,branchDebyeT,Optic_base) = calc_MGV(filepath,weight)
branchgrun = calc_MGP(filepath,weight)
gruneisen[0:3] = branchgrun[0:3]
velocity[0:3] = branchvel[0:3]
DebyeT[0:3] = branchDebyeT[0:3]
freq[0:3] = branchfreq[0:3]
weightsum = np.sum(branchfreq[3:])
#The following is for optic branch
for i in np.arange(3,len(branchfreq)):
gruneisen[3] = gruneisen[3] + branchfreq[i] * branchgrun[i]
velocity[3] = velocity[3] + branchfreq[i] * branchvel[i]
DebyeT[3] = DebyeT[3] + branchfreq[i] * branchDebyeT[i]
gruneisen[3] = gruneisen[3]/weightsum
velocity[3] = velocity[3]/weightsum
DebyeT[3] = DebyeT[3]/weightsum
freq[3] = DebyeT[3] * Boltzm/(1e12 * planck)
return gruneisen, velocity, DebyeT, freq, Optic_base
def calc_MFPS(Elem_tabl):
'''Calculate mass fluctuation phonon scattering parameter. '''
tab_len = len(Elem_tabl)
Mass = [pmg.core.Element[Elem_tabl[i]].atomic_mass for i in np.arange(tab_len)]
MassSum = np.sum(Mass)
MFPS = 0.0
for i in np.arange(tab_len):
MFPS = MFPS + (Mass[i]/MassSum)**2 * MassFluct[Elem_tabl[i]]
MFPS = tab_len * MFPS
return MFPS
def Write_INPCAR(coord, step_size, bnd_num, prg, lattice):
''' '''
fp = open('INPCAR','w')
fp.write('%.6f %.6f %.6f%s' % (coord[0], coord[1], coord[2], os.linesep))
fp.write('%f%s' % (step_size, os.linesep))
fp.write('%d%s' % (bnd_num, os.linesep))
fp.write('%s%s' % (prg, os.linesep))
for vector in lattice:
fp.write('%.9f %.9f %.9f%s' % (vector[0], vector[1], vector[2], os.linesep))
fp.close()
return | AICON | /AICON-2.0.1-py3-none-any.whl/aicon/tools.py | tools.py |
import numpy as np
import scipy.constants
from scipy.integrate import quad
from aicon.myemc import EffectMass
from aicon.deformatpotent import DeformatPotentC
from aicon.dielectric import DielConst
from aicon.elastic import ElasticConst
Planck = scipy.constants.hbar
EPlanck = scipy.constants.physical_constants['Planck constant over 2 pi in eV s'][0]
Boltzm = scipy.constants.Boltzmann
EBoltzm = scipy.constants.physical_constants['Boltzmann constant in eV/K'][0]
m_e = scipy.constants.m_e
EtoJoul = scipy.constants.physical_constants['joule-electron volt relationship'][0]
C_e = scipy.constants.e
epsilon_0 = scipy.constants.epsilon_0
def Upperlimit(x):
if x < 0:
return 100
else:
return x + 100
class RelaxTime(object):
'''
This is the parent class of all relaxation time classes with shared properties.
'''
def __init__(self, flag, degeneracy, bandgap, *pos):
self.value = 0.0
self.flag = flag
self.N = degeneracy
self.Bandgap = bandgap
self.Beta = lambda T: EBoltzm * T / self.Bandgap
self.pos = pos
def __get__(self, obj, typ = None):
return self.value
def __str__(self):
return '%.2f' % self.value
__repr__ = __str__
def Get_effemass(self, filepath):
self.EMC = EffectMass()
if self.flag == 'VBM':
filepath = filepath + 'VBM/'
elif self.flag == 'CBM':
filepath = filepath + 'CBM/'
elif self.flag == 'CSB':
filepath = filepath + 'CSB/'
else:
filepath = filepath + 'VSB/'
inpcar_fh = open(filepath+'INPCAR', 'r')
kpt, stepsize, band, prg, basis = self.EMC.parse_inpcar(inpcar_fh)
self.EMC.cal_effmass(kpt, stepsize, band, prg, basis, filepath+'EIGENVAL')
self.effmass = lambda z, T: np.abs(self.EMC.condeffmass) * (1 + 2 * self.Beta(T) * z)
def Get_deformpot(self, filepath):
self.DPC = DeformatPotentC(self.flag)
path = [filepath + 'equi/', filepath + '0.5per/', filepath + '1.0per/']
self.DPC.Get_DPC(path, *self.pos)
def Get_elastconst(self, filepath):
self.Elastic = ElasticConst()
self.Elastic.Get_AvgLongEConst(filepath)
def Get_dielconst(self, filepath):
self.Diel = DielConst()
self.Diel.Get_DielConst(filepath)
def Set_degeneracy(self, value):
self.N = value
def Set_bandgap(self, value):
self.Bandgap = value
def Get_moment(self):
self.Moment = lambda z, T: 1e-9 * np.sqrt(EtoJoul) * (2 * np.abs(self.EMC.doseffmass) * m_e)**(1/2) / EPlanck * (z * EBoltzm * T * (1 + z * EBoltzm * T / self.Bandgap))**(1/2)
#unit is nm
def Get_fermidistrFun(self, z, x):
if np.exp(z-x) == np.inf:
return 0.0
else:
return 1.0 / (np.exp(z - x) + 1)
def DfermidistrFun(self, z, x):
if np.exp(z-x)**2 == np.inf:
return 0.0
else:
return -np.exp(z - x) / (np.exp(z - x) + 1)**2
def integral(self, x, T, n, m, k):
integrand = lambda z, x, T: (-self.DfermidistrFun(z,x)) * z**(n) * (z + self.Beta(T) * z**2)**(m) * (1 + 2 * self.Beta(T) * z)**(k)
return quad(integrand, 0, Upperlimit(x), args=(x, T))[0]
def Fermiintegral(self, x, n):
integrand = lambda z, x: self.Get_fermidistrFun(z, x) * z**(n)
return quad(integrand, 0, np.inf, args=(x))[0]
class AcoRelaxTime(RelaxTime):
''' this is the subclass of RelaxTime, to represent acoustic phonon relaxation time with non-parabolic approximation'''
def Get_DOSfun(self):
self.doseffmass = self.N**(2/3) * self.EMC.doseffmass
self.DOS = lambda z, T: 2**(1/2) * np.abs(self.doseffmass * m_e)**(3/2) / (np.pi**2 * EPlanck**3) * (z * EBoltzm * T)**(1/2) \
* (1 + 2 * z * EBoltzm * T / self.Bandgap) * (1 + z * EBoltzm * T / self.Bandgap)**(1/2) #here I omit the constant EtoJoul^3/2
def Get_relaxtimefun(self, filepath):
self.Get_deformpot(filepath)
self.Get_elastconst(filepath + 'elastic/')
self.Get_effemass(filepath)
self.Beta = lambda T: EBoltzm * T / self.Bandgap
self.Get_DOSfun()
self.Acotime = lambda z, T: 1 / (np.sqrt(EtoJoul) * np.pi * EBoltzm * T * self.DOS(z, T) * self.DPC.value**2 / (EPlanck * self.Elastic.value * 1e9 * self.N) \
* (1 - 8 * self.Beta(T) * (z + self.Beta(T) * z**2) / (3 * (1 + 2 * self.Beta(T) * z)**2)))
def Get_Avgacotime(self, filepath):
self.Get_relaxtimefun(filepath)
self.Get_moment()
# fun1 = lambda z, x, T: 1e14 * self.Acotime(z, T) * (-self.DfermidistrFun(z,x)) * self.Moment(z, T)**3
fun2 = lambda z, x, T: (-self.DfermidistrFun(z,x)) * self.Moment(z, T)**3
fun4 = lambda z, x, T: 1e14 * self.Acotime(z, T) * (-self.DfermidistrFun(z,x)) * self.Moment(z, T)**3 / (1 + 2 * self.Beta(T) * z)
fun5 = lambda z, x, T: self.effmass(z, T) * (-self.DfermidistrFun(z,x)) * self.Moment(z, T)**3
self.integfun2 = lambda x, T: quad(fun2, 0, Upperlimit(x), args=(x, T))[0]
self.integfun4 = lambda x, T: quad(fun4, 0, Upperlimit(x), args=(x, T))[0]
self.integfun5 = lambda x, T: quad(fun5, 0, Upperlimit(x), args=(x, T))[0]
def Get_values(self, x, T):
Moment = self.integfun2(x, T)
Avgtime = self.integfun4(x, T) / Moment * 1e-14
Avgeffmass = self.integfun5(x, T) / Moment
return Moment, Avgtime, Avgeffmass
class AcoRelaxTime_Para(RelaxTime):
''' this is the subclass of RelaxTime, to represent acoustic phonon relaxation time with parabolic approximation'''
def Get_moment(self):
self.Moment = lambda z, T: 1e-9 * np.sqrt(EtoJoul) * (2 * np.abs(self.EMC.doseffmass) * m_e)**(1/2) / EPlanck * (z * EBoltzm * T)**(1/2)
def Get_DOSfun(self):
self.doseffmass = self.N**(2/3) * self.EMC.doseffmass
self.DOS = lambda z, T: 2**(1/2) * np.abs(self.doseffmass * m_e)**(3/2) / (np.pi**2 * EPlanck**3) * (z * EBoltzm * T)**(1/2)
def Get_relaxtimefun(self, filepath):
self.Get_deformpot(filepath)
self.Get_elastconst(filepath + 'elastic/')
self.Get_effemass(filepath)
self.Get_DOSfun()
self.Acotime = lambda z, T: 1 / (np.sqrt(EtoJoul) * np.pi * EBoltzm * T * self.DOS(z, T) * self.DPC.value**2 / (EPlanck * self.Elastic.value * 1e9 * self.N))
def Get_Avgacotime(self, filepath):
self.Get_relaxtimefun(filepath)
self.Get_moment()
fun1 = lambda z, x, T: 1e14 * self.Acotime(z, T) * (-self.DfermidistrFun(z,x)) * self.Moment(z, T)**3
fun2 = lambda z, x, T: (-self.DfermidistrFun(z,x)) * self.Moment(z, T)**3
self.integfun1 = lambda x, T: quad(fun1, 0, Upperlimit(x), args=(x, T))[0]
self.integfun2 = lambda x, T: quad(fun2, 0, Upperlimit(x), args=(x, T))[0]
def Get_values(self, x, T):
Moment = self.integfun2(x, T)
Avgtime = self.integfun1(x, T) / Moment * 1e-14
Avgeffmass = np.abs(self.EMC.condeffmass)
return Moment, Avgtime, Avgeffmass
class OptRelaxTime(RelaxTime):
'''this is the subclass of RelaxTime, to represent polar optical phonon relaxation time with non-parabolic approximation'''
def Get_DOSfun(self):
self.doseffmass = self.N**(2/3) * self.EMC.doseffmass
self.DOS = lambda z, T: 2**(1/2) * np.abs(self.doseffmass * m_e)**(3/2) / (np.pi**2 * EPlanck**3) * (z * EBoltzm * T)**(1/2) \
* (1 + 2 * z * self.Beta(T)) * (1 + z * self.Beta(T))**(1/2)
def Get_scrad(self):
self.Get_DOSfun()
self.Scrad = lambda z, T: EtoJoul**(5/2) * 4 * np.pi * C_e**2 * self.DOS(z, T) / self.Diel.electron
def Get_delta(self, filepath):
self.Get_effemass(filepath)
self.Get_dielconst(filepath + "dielect/")
self.Get_scrad()
self.Get_moment()
self.Delta = lambda z, T: (2 * self.Moment(z, T) * 1e9)**(-2) * self.Scrad(z, T)
def Get_relaxtimefun(self, filepath):
self.Get_delta(filepath)
self.Opttime = lambda z, T: 1/(EtoJoul**(3/2) * 2**(1/2) * EBoltzm * T * C_e**2 * np.abs(self.EMC.doseffmass * m_e)**(1/2) * (self.Diel.electron**(-1) - self.Diel.static**(-1)) / (EPlanck**2 * (z * EBoltzm * T)**(1/2)) \
* (1 + 2 * self.Beta(T) * z) / (1 + self.Beta(T) * z)**(1/2) * ((1 - self.Delta(z, T) * np.log(1 + 1 / self.Delta(z, T))) - 2 * self.Beta(T) * (z + self.Beta(T) * z**2) \
/ (1 + 2 * self.Beta(T) * z)**2 * (1 - 2 * self.Delta(z, T) + 2 * self.Delta(z, T)**2 * np.log(1 + 1 / self.Delta(z, T)))))
def Get_Avgopttime(self, filepath):
self.Get_relaxtimefun(filepath)
fun1 = lambda z, x, T: 1e14 * self.Opttime(z, T) * (-self.DfermidistrFun(z,x)) * self.Moment(z, T)**3
# fun2 = lambda z, x, T: (-self.DfermidistrFun(z,x)) * self.Moment(z, T)**3
self.integfun1 = lambda x, T: quad(fun1, 0, Upperlimit(x), args=(x, T))[0]
def Get_values(self, x, T, Moment):
Avgtime = self.integfun1(x, T) / Moment * 1e-14
return Avgtime
class OptRelaxTime_Para(RelaxTime):
'''this is the subclass of RelaxTime, to represent polar optical phonon relaxation time with parabolic approximation'''
def Get_moment(self):
self.Moment = lambda z, T: 1e-9 * np.sqrt(EtoJoul) * (2 * np.abs(self.EMC.doseffmass) * m_e)**(1/2) / EPlanck * (z * EBoltzm * T)**(1/2)
def Get_DOSfun(self):
self.doseffmass = self.N**(2/3) * self.EMC.doseffmass
self.DOS = lambda z, T: 2**(1/2) * np.abs(self.doseffmass * m_e)**(3/2) / (np.pi**2 * EPlanck**3) * (z * EBoltzm * T)**(1/2)
def Get_scrad(self, filepath):
self.Get_DOSfun()
self.Scrad = lambda z, T: EtoJoul**(5/2) * 4 * np.pi * C_e**2 * self.DOS(z, T) / self.Diel.electron
def Get_delta(self, filepath):
self.Get_effemass(filepath)
self.Get_dielconst(filepath + "dielect/")
self.Get_scrad(filepath)
self.Get_moment()
self.Delta = lambda z, T: (2 * self.Moment(z, T) * 1e9)**(-2) * self.Scrad(z, T)
def Get_relaxtimefun(self, filepath):
self.Get_delta(filepath)
self.Opttime = lambda z, T: 1/(EtoJoul**(3/2) * 2**(1/2) * EBoltzm * T * C_e**2 * np.abs(self.EMC.doseffmass * m_e)**(1/2) * (self.Diel.electron**(-1) - self.Diel.static**(-1)) / (EPlanck**2 * (z * EBoltzm * T)**(1/2)) \
* (1 - self.Delta(z, T) * np.log(1 + 1 / self.Delta(z, T))))
def Get_Avgopttime(self, filepath):
self.Get_relaxtimefun(filepath)
fun1 = lambda z, x, T: 1e14 * self.Opttime(z, T) * (-self.DfermidistrFun(z,x)) * self.Moment(z, T)**3
# fun2 = lambda z, x, T: (-self.DfermidistrFun(z,x)) * self.Moment(z, T)**3
self.integfun1 = lambda x, T: quad(fun1, 0, Upperlimit(x), args=(x, T))[0]
def Get_values(self, x, T, Moment):
Avgtime = self.integfun1(x, T) / Moment * 1e-14
return Avgtime
class ImpurityRelaxTime(RelaxTime):
'''this is the subclass of RelaxTime, to represent ionic impurity relaxation time with non-parabolic approximation'''
def Get_DOSfun(self):
self.doseffmass = self.N**(2/3) * self.EMC.doseffmass
self.DOS = lambda z, T: 2**(1/2) * np.abs(self.doseffmass * m_e)**(3/2) / (np.pi**2 * EPlanck**3) * (z * EBoltzm * T)**(1/2) \
* (1 + 2 * z * self.Beta(T)) * (1 + z * self.Beta(T))**(1/2) #here I omit the constant EtoJoul^3/2
def Get_AvgDOS(self, filepath):
self.Get_effemass(filepath)
self.Get_DOSfun()
self.Get_moment()
fun1 = lambda z, x, T: self.DOS(z, T) * (-self.DfermidistrFun(z,x)) * self.Moment(z, T)**3
self.integfun1 = lambda x, T: quad(fun1, 0, Upperlimit(x), args=(x, T))[0]
def Get_scrad(self, filepath):
self.Get_AvgDOS(filepath)
self.Get_dielconst(filepath + "dielect/")
self.Scrad_2 = lambda x, T: EtoJoul**(5/2) * 4 * np.pi * C_e**2 * self.AvgDOS(x, T) / self.Diel.static
def Get_kF(self):
self.k_F = lambda x, T: (3 * np.pi**2 * self.Density(x, T) / self.N)**(1/3)
def Get_delta(self, filepath):
self.Get_scrad(filepath)
self.Get_kF()
self.Delta_2 = lambda x, T: (2 * self.k_F(x, T))**(-2) * self.Scrad_2(x, T)
def Get_Imptime(self, filepath):
self.Get_AvgDOS(filepath)
self.Get_dielconst(filepath + "dielect/")
def Get_values(self, x, T, Moment, Density):
AvgDOS = self.integfun1(x, T) / Moment
Scrad_2 = EtoJoul**(5/2) * 4 * np.pi * C_e**2 * AvgDOS / self.Diel.static
k_F = (3 * np.pi**2 * Density / self.N)**(1/3)
Delta_2 = (2 * k_F)**(-2) * Scrad_2
Avgtime = 1 / (EtoJoul**3 * 2 * C_e**4 * self.N * np.abs(self.EMC.doseffmass) * m_e * (1 + 2 * x * self.Beta(T)) \
* (np.log(1 + 1/Delta_2) - (1 + Delta_2)**(-1)) \
/ (3 * np.pi * self.Diel.static**2 * EPlanck**3))
return Avgtime
class ImpurityRelaxTime_Para(RelaxTime):
'''this is the subclass of RelaxTime, to represent ionic impurity relaxation time with parabolic approximation'''
def Get_moment(self):
self.Moment = lambda z, T: 1e-9 * np.sqrt(EtoJoul) * (2 * np.abs(self.EMC.doseffmass) * m_e)**(1/2) / EPlanck * (z * EBoltzm * T)**(1/2) #unit nm
def Get_DOSfun(self):
self.doseffmass = self.N**(2/3) * self.EMC.doseffmass
self.DOS = lambda z, T: 2**(1/2) * np.abs(self.doseffmass * m_e)**(3/2) / (np.pi**2 * EPlanck**3) * (z * EBoltzm * T)**(1/2) #here I omit the constant EtoJoul^3/2
def Get_AvgDOS(self, filepath):
self.Get_effemass(filepath)
self.Get_DOSfun()
self.Get_moment()
fun1 = lambda z, x, T: self.DOS(z, T) * (-self.DfermidistrFun(z,x)) * self.Moment(z, T)**3
self.integfun1 = lambda x, T: quad(fun1, 0, Upperlimit(x), args=(x, T))[0]
def Get_scrad(self, filepath):
self.Get_AvgDOS(filepath)
self.Get_dielconst(filepath + "dielect/")
self.Scrad_2 = lambda x, T: EtoJoul**(5/2) * 4 * np.pi * C_e**2 * self.AvgDOS(x, T) / self.Diel.static
def Get_kF(self):
self.k_F = lambda x, T: (3 * np.pi**2 * self.Density(x, T) / self.N)**(1/3)
def Get_delta(self, filepath):
self.Get_scrad(filepath)
self.Get_kF()
self.Delta_2 = lambda x, T: (2 * self.k_F(x, T))**(-2) * self.Scrad_2(x, T)
def Get_Imptime(self, filepath):
self.Get_AvgDOS(filepath)
self.Get_dielconst(filepath + "dielect/")
def Get_values(self, x, T, Moment, Density):
AvgDOS = self.integfun1(x, T) / Moment
Scrad_2 = EtoJoul**(5/2) * 4 * np.pi * C_e**2 * AvgDOS / self.Diel.static
k_F = (3 * np.pi**2 * Density / self.N)**(1/3)
Delta_2 = (2 * k_F)**(-2) * Scrad_2
Avgtime = 1 / (EtoJoul**3 * 2 * C_e**4 * self.N * np.abs(self.EMC.doseffmass) * m_e * (1 + 2 * x * self.Beta(T)) \
* (np.log(1 + 1/Delta_2) - (1 + Delta_2)**(-1)) \
/ (3 * np.pi * self.Diel.static**2 * EPlanck**3))
return Avgtime
class TotalRelaxTime(RelaxTime):
'''this is the subclass of RelaxTime, to represent total relaxation time'''
def Get_Totaltime(self, filepath, ACO = True, ACO_P = False, OPT = False, OPT_P = False, IMP = False, IMP_P = False):
if ACO == True:
self.ACO = AcoRelaxTime(self.flag, self.N, self.Bandgap, *self.pos)
self.ACO.Get_Avgacotime(filepath)
if ACO_P == True:
self.ACO = AcoRelaxTime_Para(self.flag, self.N, self.Bandgap, *self.pos)
self.ACO.Get_Avgacotime(filepath)
if OPT == True:
self.OPT = OptRelaxTime(self.flag, self.N, self.Bandgap)
self.OPT.Get_Avgopttime(filepath)
if self.OPT.Diel.ion == 0:
self.OPT.integfun1 = lambda x, T: np.inf
else:
pass
if OPT_P == True:
self.OPT = OptRelaxTime_Para(self.flag, self.N, self.Bandgap)
self.OPT.Get_Avgopttime(filepath)
if self.OPT.Diel.ion == 0:
self.OPT.integfun1 = lambda x, T: np.inf
else:
pass
if IMP == True:
self.IMP = ImpurityRelaxTime(self.flag, self.N, self.Bandgap)
self.IMP.Get_Imptime(filepath)
if IMP_P == True:
self.IMP = ImpurityRelaxTime_Para(self.flag, self.N, self.Bandgap)
self.IMP.Get_Imptime(filepath)
self.doseffmass = self.ACO.doseffmass | AICON | /AICON-2.0.1-py3-none-any.whl/aicon/relaxtime.py | relaxtime.py |
import numpy as np
import sys
import time
class EffectMass(object):
'''This class is used to calculate band effective mass at specific point and store the data. '''
EMC_VERSION = '1.51py'
STENCIL = 5 #3 or 5
Bohr = 0.52917721092
def __init__(self):
###################################################################################################
#
# STENCILS for finite difference
#
# three-point stencil
self.st3 = []
self.st3.append([0.0, 0.0, 0.0]); # 0
self.st3.append([-1.0, 0.0, 0.0]); self.st3.append([1.0, 0.0, 0.0]); # dx 1-2
self.st3.append([0.0, -1.0, 0.0]); self.st3.append([0.0, 1.0, 0.0]) # dy 3-4
self.st3.append([0.0, 0.0, -1.0]); self.st3.append([0.0, 0.0, 1.0]) # dz 5-6
self.st3.append([-1.0, -1.0, 0.0]); self.st3.append([1.0, 1.0, 0.0]); self.st3.append([1.0, -1.0, 0.0]); self.st3.append([-1.0, 1.0, 0.0]); # dxdy 7-10
self.st3.append([-1.0, 0.0, -1.0]); self.st3.append([1.0, 0.0, 1.0]); self.st3.append([1.0, 0.0, -1.0]); self.st3.append([-1.0, 0.0, 1.0]); # dxdz 11-14
self.st3.append([0.0, -1.0, -1.0]); self.st3.append([0.0, 1.0, 1.0]); self.st3.append([0.0, 1.0, -1.0]); self.st3.append([0.0, -1.0, 1.0]); # dydz 15-18
#
# five-point stencil
self.st5 = []
self.st5.append([0.0, 0.0, 0.0])
#
a = [-2,-1,1,2]
for i in range(len(a)): #dx
self.st5.append([float(a[i]), 0., 0.])
#
for i in range(len(a)): #dy
self.st5.append([0., float(a[i]), 0.])
#
for i in range(len(a)): #dz
self.st5.append([0., 0., float(a[i])])
#
for i in range(len(a)):
i1=float(a[i])
for j in range(len(a)):
j1=float(a[j])
self.st5.append([j1, i1, 0.]) # dxdy
#
for i in range(len(a)):
i1=float(a[i])
for j in range(len(a)):
j1=float(a[j])
self.st5.append([j1, 0., i1,]) # dxdz
#
for i in range(len(a)):
i1=float(a[i])
for j in range(len(a)):
j1=float(a[j])
self.st5.append([0., j1, i1]) # dydz
self.masses = np.zeros(3)
self.vecs_cart = np.zeros((3,3))
self.vecs_frac = np.zeros((3,3))
self.vecs_n = np.zeros((3,3))
def __get__(self, obj, typ = None):
return self.masses
def __str__(self):
return '%.3f %.3f %.3f' % (self.masses[0], self.masses[1], self.masses[2])
__repr__ = __str__
##################################### Class Method #####################################################
def MAT_m_VEC(self, m, v):
p = [ 0.0 for i in range(len(v)) ]
for i in range(len(m)):
assert len(v) == len(m[i]), 'Length of the matrix row is not equal to the length of the vector'
p[i] = sum( [ m[i][j]*v[j] for j in range(len(v)) ] )
return p
def T(self, m):
p = [[ m[i][j] for i in range(len( m[j] )) ] for j in range(len( m )) ]
return p
def N(self, v):
max_ = 0.
for item in v:
if abs(item) > abs(max_): max_ = item
return [ item/max_ for item in v ]
def DET_3X3(self, m):
assert len(m) == 3, 'Matrix should be of the size 3 by 3'
return m[0][0]*m[1][1]*m[2][2] + m[1][0]*m[2][1]*m[0][2] + m[2][0]*m[0][1]*m[1][2] - \
m[0][2]*m[1][1]*m[2][0] - m[2][1]*m[1][2]*m[0][0] - m[2][2]*m[0][1]*m[1][0]
def SCALE_ADJOINT_3X3(self, m, s):
a = [[0.0 for i in range(3)] for j in range(3)]
a[0][0] = (s) * (m[1][1] * m[2][2] - m[1][2] * m[2][1])
a[1][0] = (s) * (m[1][2] * m[2][0] - m[1][0] * m[2][2])
a[2][0] = (s) * (m[1][0] * m[2][1] - m[1][1] * m[2][0])
a[0][1] = (s) * (m[0][2] * m[2][1] - m[0][1] * m[2][2])
a[1][1] = (s) * (m[0][0] * m[2][2] - m[0][2] * m[2][0])
a[2][1] = (s) * (m[0][1] * m[2][0] - m[0][0] * m[2][1])
a[0][2] = (s) * (m[0][1] * m[1][2] - m[0][2] * m[1][1])
a[1][2] = (s) * (m[0][2] * m[1][0] - m[0][0] * m[1][2])
a[2][2] = (s) * (m[0][0] * m[1][1] - m[0][1] * m[1][0])
return a
def INVERT_3X3(self, m):
tmp = 1.0/self.DET_3X3(m)
return self.SCALE_ADJOINT_3X3(m, tmp)
def IS_SYMMETRIC(self, m):
for i in range(len(m)):
for j in range(len(m[i])):
if m[i][j] != m[j][i]: return False # automatically checks square-shape
return True
def jacobi(self, ainput):
'''
Diagonalize a real symmetric matrix using the variable threshold cyclic Jacobi method.
'''
from math import sqrt
#
a = [[ ainput[i][j] for i in range(len( ainput[j] )) ] for j in range(len( ainput )) ] # copymatrix
n = len(a)
m = len(a[0])
if n != m:
raise 'jacobi: Matrix must be square'
#
for i in range(n):
for j in range(m):
if a[i][j] != a[j][i]:
raise 'jacobi: Matrix must be symmetric'
#
tolmin = 1e-14
tol = 1e-4
#
v = [[0.0 for i in range(n)] for j in range(n)] # zeromatrix
for i in range(n):
v[i][i] = 1.0
#
maxd = 0.0
for i in range(n):
maxd = max(abs(a[i][i]),maxd)
#
for iter in range(50):
nrot = 0
for i in range(n):
for j in range(i+1,n):
aii = a[i][i]
ajj = a[j][j]
daij = abs(a[i][j])
if daij > tol*maxd: # Screen small elements
nrot = nrot + 1
s = aii - ajj
ds = abs(s)
if daij > (tolmin*ds): # Check for sufficient precision
if (tol*daij) > ds:
c = s = 1/sqrt(2.)
else:
t = a[i][j]/s
u = 0.25/sqrt(0.25+t*t)
c = sqrt(0.5+u)
s = 2.*t*u/c
#
for k in range(n):
u = a[i][k]
t = a[j][k]
a[i][k] = s*t + c*u
a[j][k] = c*t - s*u
#
for k in range(n):
u = a[k][i]
t = a[k][j]
a[k][i] = s*t + c*u
a[k][j]= c*t - s*u
#
for k in range(n):
u = v[i][k]
t = v[j][k]
v[i][k] = s*t + c*u
v[j][k] = c*t - s*u
#
a[j][i] = a[i][j] = 0.0
maxd = max(maxd,abs(a[i][i]),abs(a[j][j]))
#
if nrot == 0 and tol <= tolmin:
break
tol = max(tolmin,tol*0.99e-2)
#
if nrot != 0:
print('jacobi: [WARNING] Jacobi iteration did not converge in 50 passes!')
#
# Sort eigenvectors and values into increasing order
e = [0.0 for i in range(n)] # zerovector
for i in range(n):
e[i] = a[i][i]
for j in range(i):
if e[j] > e[i]:
(e[i],e[j]) = (e[j],e[i])
(v[i],v[j]) = (v[j],v[i])
#
return (v,e)
#
def cart2frac(self, basis, v):
return self.MAT_m_VEC( self.T(self.INVERT_3X3(basis)), v )
def fd_effmass_st3(self, e, h):
m = [[0.0 for i in range(3)] for j in range(3)]
m[0][0] = (e[1] - 2.0*e[0] + e[2])/h**2
m[1][1] = (e[3] - 2.0*e[0] + e[4])/h**2
m[2][2] = (e[5] - 2.0*e[0] + e[6])/h**2
m[0][1] = (e[7] + e[8] - e[9] - e[10])/(4.0*h**2)
m[0][2] = (e[11] + e[12] - e[13] - e[14])/(4.0*h**2)
m[1][2] = (e[15] + e[16] - e[17] - e[18])/(4.0*h**2)
# symmetrize
m[1][0] = m[0][1]
m[2][0] = m[0][2]
m[2][1] = m[1][2]
#
# print '-> fd_effmass_st3: Effective mass tensor:\n'
# for i in range(len(m)):
# print '%15.8f %15.8f %15.8f' % (m[i][0], m[i][1], m[i][2])
# print ''
# #
return m
def fd_effmass_st5(self, e, h):
m = [[0.0 for i in range(3)] for j in range(3)]
#
m[0][0] = (-(e[1]+e[4]) + 16.0*(e[2]+e[3]) - 30.0*e[0])/(12.0*h**2)
m[1][1] = (-(e[5]+e[8]) + 16.0*(e[6]+e[7]) - 30.0*e[0])/(12.0*h**2)
m[2][2] = (-(e[9]+e[12]) + 16.0*(e[10]+e[11]) - 30.0*e[0])/(12.0*h**2)
#
m[0][1] = (-63.0*(e[15]+e[20]+e[21]+e[26]) + 63.0*(e[14]+e[17]+e[27]+e[24]) \
+44.0*(e[16]+e[25]-e[13]-e[28]) + 74.0*(e[18]+e[23]-e[19]-e[22]))/(600.0*h**2)
m[0][2] = (-63.0*(e[31]+e[36]+e[37]+e[42]) + 63.0*(e[30]+e[33]+e[43]+e[40]) \
+44.0*(e[32]+e[41]-e[29]-e[44]) + 74.0*(e[34]+e[39]-e[35]-e[38]))/(600.0*h**2)
m[1][2] = (-63.0*(e[47]+e[52]+e[53]+e[58]) + 63.0*(e[46]+e[49]+e[59]+e[56]) \
+44.0*(e[48]+e[57]-e[45]-e[60]) + 74.0*(e[50]+e[55]-e[51]-e[54]))/(600.0*h**2)
#
# symmetrize
m[1][0] = m[0][1]
m[2][0] = m[0][2]
m[2][1] = m[1][2]
#
# print '-> fd_effmass_st5: Effective mass tensor:\n'
# for i in range(3):
# print '%15.8f %15.8f %15.8f' % (m[i][0], m[i][1], m[i][2])
# print ''
#
return m
def generate_kpoints(self, kpt_frac, st, h, prg, basis):
from math import pi
#
# working in the reciprocal space
m = self.INVERT_3X3(self.T(basis))
basis_r = [[ m[i][j]*2.0*pi for j in range(3) ] for i in range(3) ]
#
kpt_rec = self.MAT_m_VEC(self.T(basis_r), kpt_frac)
# print '-> generate_kpoints: K-point in reciprocal coordinates: %5.3f %5.3f %5.3f' % (kpt_rec[0], kpt_rec[1], kpt_rec[2])
#
if prg == 'V' or prg == 'P':
h = h*(1/EffectMass.Bohr) # [1/A]
#
kpoints = []
for i in range(len(st)):
k_c_ = [ kpt_rec[j] + st[i][j]*h for j in range(3) ] # getting displaced k points in Cartesian coordinates
k_f = self.cart2frac(basis_r, k_c_)
kpoints.append( [k_f[0], k_f[1], k_f[2]] )
#
return kpoints
def parse_bands_CASTEP(self, eigenval_fh, band, diff2_size, debug=False):
# Number of k-points X
nkpt = int(eigenval_fh.readline().strip().split()[3])
# Number of spin components X
spin_components = float(eigenval_fh.readline().strip().split()[4])
# Number of electrons X.00 Y.00
tmp = eigenval_fh.readline().strip().split()
if spin_components == 1:
nelec = int(float(tmp[3]))
n_electrons_down = None
elif spin_components == 2:
nelec = [float(tmp[3])]
n_electrons_down = int(float(tmp[4]))
# Number of eigenvalues X
nband = int(eigenval_fh.readline().strip().split()[3])
energies = []
# Get eigenenergies and unit cell from .bands file
while True:
line = eigenval_fh.readline()
if not line:
break
#
if 'Spin component 1' in line:
for i in range(1, nband + 1):
energy = float(eigenval_fh.readline().strip())
if band == i:
energies.append(energy)
return energies
def parse_EIGENVAL_VASP(self, eigenval_fh, band, diff2_size, debug=False):
ev2h = 1.0/27.21138505
eigenval_fh.seek(0) # just in case
eigenval_fh.readline()
eigenval_fh.readline()
eigenval_fh.readline()
eigenval_fh.readline()
eigenval_fh.readline()
#
nelec, nkpt, nband = [int(s) for s in eigenval_fh.readline().split()]
# if debug: print 'From EIGENVAL: Number of the valence band is %d (NELECT/2)' % (nelec/2)
if band > nband:
print('Requested band (%d) is larger than total number of the calculated bands (%d)!' % (band, nband))
sys.exit(1)
energies = []
for i in range(diff2_size):
eigenval_fh.readline() # empty line
eigenval_fh.readline() # k point coordinates
for j in range(1, nband+1):
line = eigenval_fh.readline()
if band == j:
energies.append(float(line.split()[1])*ev2h)
# if debug: print ''
return energies
#
def parse_nscf_PWSCF(self, eigenval_fh, band, diff2_size, debug=False):
ev2h = 1.0/27.21138505
eigenval_fh.seek(0) # just in case
engrs_at_k = []
energies = []
#
while True:
line = eigenval_fh.readline()
if not line:
break
#
if "End of band structure calculation" in line:
for i in range(diff2_size):
#
while True:
line = eigenval_fh.readline()
if "occupation numbers" in line:
break
#
if "k =" in line:
a = [] # energies at a k-point
eigenval_fh.readline() # empty line
#
while True:
line = eigenval_fh.readline()
if line.strip() == "": # empty line
break
#
a.extend(line.strip().split())
#
#print a
assert len(a) <= band, 'Length of the energies array at a k-point is smaller than band param'
energies.append(float(a[band-1])*ev2h)
#
#print engrs_at_k
return energies
#
def parse_inpcar(self, inpcar_fh, debug=False):
import re
#
kpt = [] # k-point at which eff. mass in reciprocal reduced coords (3 floats)
stepsize = 0.0 # stepsize for finite difference (1 float) in Bohr
band = 0 # band for which eff. mass is computed (1 int)
prg = '' # program identifier (1 char)
basis = [] # basis vectors in cartesian coords (3x3 floats), units depend on the program identifier
#
inpcar_fh.seek(0) # just in case
p = re.search(r'^\s*(-*\d+\.\d+)\s+(-*\d+\.\d+)\s+(-*\d+\.\d+)', inpcar_fh.readline())
if p:
kpt = [float(p.group(1)), float(p.group(2)), float(p.group(3))]
if debug: print("Found k point in the reduced reciprocal space: %5.3f %5.3f %5.3f" % (kpt[0], kpt[1], kpt[2]))
else:
print("Was expecting k point on the line 0 (3 floats), didn't get it, exiting...")
sys.exit(1)
p = re.search(r'^\s*(\d+\.\d+)', inpcar_fh.readline())
if p:
stepsize = float(p.group(1))
if debug: print("Found stepsize of: %5.3f (1/Bohr)" % stepsize)
else:
print("Was expecting a stepsize on line 1 (1 float), didn't get it, exiting...")
sys.exit(1)
p = re.search(r'^\s*(\d+)', inpcar_fh.readline())
if p:
band = int(p.group(1))
if debug: print("Requested band is : %5d" % band)
else:
print("Was expecting band number on line 2 (1 int), didn't get it, exiting...")
sys.exit(1)
p = re.search(r'^\s*(\w)', inpcar_fh.readline())
if p:
prg = p.group(1)
if debug: print("Program identifier is: %5c" % prg)
else:
print("Was expecting program identifier on line 3 (1 char), didn't get it, exiting...")
sys.exit(1)
for i in range(3):
p = re.search(r'^\s*(-*\d+\.\d+)\s+(-*\d+\.\d+)\s+(-*\d+\.\d+)', inpcar_fh.readline())
if p:
basis.append([float(p.group(1)), float(p.group(2)), float(p.group(3))])
if debug:
print("Real space basis:")
for i in range(len(basis)):
print('%9.7f %9.7f %9.7f' % (basis[i][0], basis[i][1], basis[i][2]))
if debug: print('')
return kpt, stepsize, band, prg, basis
def get_eff_masses(self, m, basis):
#
vecs_cart = [[0.0 for i in range(3)] for j in range(3)]
vecs_frac = [[0.0 for i in range(3)] for j in range(3)]
vecs_n = [[0.0 for i in range(3)] for j in range(3)]
#
eigvec, eigval = self.jacobi(m)
#
for i in range(3):
vecs_cart[i] = eigvec[i]
vecs_frac[i] = self.cart2frac(basis, eigvec[i])
vecs_n[i] = self.N(vecs_frac[i])
#
em = [ 1.0/eigval[i] for i in range(len(eigval)) ]
return em, vecs_cart, vecs_frac, vecs_n
#
def cal_effmass(self, kpt, stepsize, band, prg, basis, output_fn):
if EffectMass.STENCIL == 3:
fd_effmass = self.fd_effmass_st3
st = self.st3
elif EffectMass.STENCIL == 5:
fd_effmass = self.fd_effmass_st5
st = self.st5
else:
print('main: [ERROR] Wrong value for STENCIL, should be 3 or 5.')
sys.exit(1)
#
#
try:
output_fh = open(output_fn, 'r')
except IOError:
sys.exit("Couldn't open input file "+output_fn+", exiting...\n")
#
if output_fn:
#
energies = []
if prg.upper() == 'V' or prg.upper() == 'C':
energies = self.parse_EIGENVAL_VASP(output_fh, band, len(st))
m = fd_effmass(energies, stepsize)
#
if prg.upper() == 'Q':
energies = self.parse_nscf_PWSCF(output_fh, band, len(st))
m = fd_effmass(energies, stepsize)
#
if prg.upper() == 'P':
energies = self.parse_bands_CASTEP(output_fh, band, len(st))
m = fd_effmass(energies, stepsize)
#
masses, vecs_cart, vecs_frac, vecs_n = self.get_eff_masses(m, basis)
self.vecs_cart = np.array(vecs_cart)
self.vecs_frac = np.array(vecs_frac)
self.vecs_n = np.array(vecs_n)
self.masses = np.array(masses)
#
maxindx =np.argmax(np.abs(self.masses))
temp = 1.0
for i in np.arange(3):
if i == maxindx:
self.parallelmass = self.masses[i]
else:
temp = temp * self.masses[i]
self.verticalmass = np.sign(self.masses[0]) * np.sqrt(temp)
self.condeffmass = 3.0 / (1/self.masses[0] + 1/self.masses[1] + 1/self.masses[2])
self.doseffmass = np.sign(self.masses[0]) * np.abs(self.masses[0] * self.masses[1] * self.masses[2])**(1/3)
return
def get_kpointsfile(self, kpt, stepsize, prg, basis):
if EffectMass.STENCIL == 3:
st = self.st3
elif EffectMass.STENCIL == 5:
st = self.st5
else:
print('main: [ERROR] Wrong value for STENCIL, should be 3 or 5.')
sys.exit(1)
kpoints = self.generate_kpoints(kpt, st, stepsize, prg, basis)
kpoints_fh = open('KPOINTS', 'w')
kpoints_fh.write("generate with stepsize: "+str(stepsize)+"\n")
kpoints_fh.write("%d\n" % len(st))
kpoints_fh.write("Reciprocal\n")
#
for i, kpt in enumerate(kpoints):
kpoints_fh.write( '%15.10f %15.10f %15.10f 0.01\n' % (kpt[0], kpt[1], kpt[2]) )
#
kpoints_fh.close()
return | AICON | /AICON-2.0.1-py3-none-any.whl/aicon/myemc.py | myemc.py |
from datetime import datetime
from pymatgen.io.vasp.sets import MPRelaxSet, MPStaticSet
from fireworks import Firework, Workflow
from atomate.vasp.powerups import add_namefile
from aicon.myfireworks import MyOptimizeFW, MyStaticFW, MyNonSCFFW, MyDFPTFW, \
MyElasticFW, MyEffectivemassFW, CalElecCondFW, MyPhononFW, CalPhonCondFW
def wf_electron_conductivity(structure, vasp_input_set_relax=None, vasp_input_set_fixvol_relax=None, vasp_input_set_static=None, vasp_input_set_band=None,
vasp_input_set_diel=None, vasp_input_set_elastic=None, vasp_kpoint_set=None, vasp_cmd=">>vasp_cmd<<",
db_file=">>db_file<<", mode="standard", Temp=None, Doping=None, strain=None, ifSB=None):
'''This workflow aims to calculate electronic transport properties.'''
tag = datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S-%f')
fws = []
# get the input set for the optimization and update it if we passed custom settings
vis_relax = MPRelaxSet(structure, user_incar_settings=vasp_input_set_relax, user_kpoints_settings=vasp_kpoint_set)
# Structure optimization firework
fw_opt_equi = MyOptimizeFW(structure=structure, vasp_input_set=vis_relax, vasp_cmd=vasp_cmd,
db_file=db_file, name="equi structure optimization", count=1, spec={"_queueadapter": {"job_name": 'opt'}})
fws.append(fw_opt_equi) #1
# static calculations firework
fw_static_equi = MyStaticFW(structure=structure, vasp_input_set_params=vasp_input_set_static, prev_calc_loc="equi structure optimization-final",
vasp_cmd=vasp_cmd, db_file=db_file, name="equi static", parents=fws[0], spec={"_queueadapter": {"job_name": 'static'}})
fws.append(fw_static_equi) #2
# Structure optimization firework for 0.1% larger and 0.2% larger structures
fw_opt_05 = MyOptimizeFW(structure=structure, vasp_input_set_params=vasp_input_set_fixvol_relax, strain=strain[0], prev_calc_loc="equi structure optimization-final",
vasp_cmd=vasp_cmd, db_file=db_file, name="0.5per structure optimization", count=1, parents=fws[0], spec={"_queueadapter": {"job_name": 'opt'}})
fw_opt_10 = MyOptimizeFW(structure=structure, vasp_input_set_params=vasp_input_set_fixvol_relax, strain=strain[1], prev_calc_loc="equi structure optimization-final",
vasp_cmd=vasp_cmd, db_file=db_file, name="1.0per structure optimization", count=1, parents=fws[0], spec={"_queueadapter": {"job_name": 'opt'}})
fws.append(fw_opt_05) #3
fws.append(fw_opt_10) #4
fw_static_05 = MyStaticFW(structure=structure, vasp_input_set_params=vasp_input_set_static, prev_calc_loc="0.5per structure optimization-final",
vasp_cmd=vasp_cmd, db_file=db_file, name="0.5per static", parents=fws[2], spec={"_queueadapter": {"job_name": 'static'}})
fw_static_10 = MyStaticFW(structure=structure, vasp_input_set_params=vasp_input_set_static, prev_calc_loc="1.0per structure optimization-final",
vasp_cmd=vasp_cmd, db_file=db_file, name="1.0per static", parents=fws[3], spec={"_queueadapter": {"job_name": 'static'}})
fws.append(fw_static_05) #5
fws.append(fw_static_10) #6
# band structure calculation firework
fw_band_equi = MyNonSCFFW(structure=structure, vasp_input_set_params=vasp_input_set_band, prev_calc_loc="equi static",
name="equi nscf", vasp_cmd=vasp_cmd, db_file=db_file, parents=fws[1], spec={"_queueadapter": {"job_name": 'band'}})
fw_band_05 = MyNonSCFFW(structure=structure, vasp_input_set_params=vasp_input_set_band, prev_calc_loc="0.5per static",
name="0.5per nscf", vasp_cmd=vasp_cmd, db_file=db_file, parents=fws[4], spec={"_queueadapter": {"job_name": 'band'}})
fw_band_10 = MyNonSCFFW(structure=structure, vasp_input_set_params=vasp_input_set_band, prev_calc_loc="1.0per static",
name="1.0per nscf", vasp_cmd=vasp_cmd, db_file=db_file, parents=fws[5], spec={"_queueadapter": {"job_name": 'band'}})
fws.append(fw_band_equi) #7
fws.append(fw_band_05) #8
fws.append(fw_band_10) #9
# elastic constant calculation
fw_elastic = MyElasticFW(structure=structure, vasp_input_set_params=vasp_input_set_elastic, prev_calc_loc="equi structure optimization-final",
name="elastic", vasp_cmd=vasp_cmd, db_file=db_file, parents=fws[1], spec={"_queueadapter": {"job_name": 'elastic'}})
fws.append(fw_elastic) #10
# dielect constant calculation
fw_dielect = MyDFPTFW(structure=structure, user_incar_settings=vasp_input_set_diel, lepsilon=True, prev_calc_loc="equi static",
name="dielectric", vasp_cmd=vasp_cmd, db_file=db_file, parents=fws[1], spec={"_queueadapter": {"job_name": 'dielect'}})
fws.append(fw_dielect) #11
# effective mass
fw_effectivemass_CBM = MyEffectivemassFW(structure=structure, vasp_input_set_params=vasp_input_set_band, prev_calc_loc="equi static", whichbnd="CBM", stepsize=0.01,
name="CBM", vasp_cmd=vasp_cmd, db_file=db_file, parents=fws[6], spec={"_queueadapter": {"job_name": 'CBM'}})
fw_effectivemass_VBM = MyEffectivemassFW(structure=structure, vasp_input_set_params=vasp_input_set_band, prev_calc_loc="equi static", whichbnd="VBM", stepsize=0.01,
name="VBM", vasp_cmd=vasp_cmd, db_file=db_file, parents=fws[6], spec={"_queueadapter": {"job_name": 'VBM'}})
fw_effectivemass_CSB = MyEffectivemassFW(structure=structure, vasp_input_set_params=vasp_input_set_band, prev_calc_loc="equi static", whichbnd="CSB", stepsize=0.01,
name="CSB", vasp_cmd=vasp_cmd, db_file=db_file, parents=fws[6], spec={"_queueadapter": {"job_name": 'CSB'}})
fw_effectivemass_VSB = MyEffectivemassFW(structure=structure, vasp_input_set_params=vasp_input_set_band, prev_calc_loc="equi static", whichbnd="VSB", stepsize=0.01,
name="VSB", vasp_cmd=vasp_cmd, db_file=db_file, parents=fws[6], spec={"_queueadapter": {"job_name": 'VSB'}})
fws.append(fw_effectivemass_CBM) #12
fws.append(fw_effectivemass_VBM) #13
fws.append(fw_effectivemass_CSB) #14
fws.append(fw_effectivemass_VSB) #15
# Call AICON
fw_eleccond = CalElecCondFW(structure=structure, name="electrical conductivity", mode=mode, Temp=Temp, Doping=Doping, ifSB=ifSB,
db_file=db_file, parents=fws[6:15], spec={"_queueadapter": {"job_name": 'AICON'}})
fws.append(fw_eleccond) #16
# finally, create the workflow
wf_electron_conductivity = Workflow(fws)
wf_electron_conductivity.name = "{}:{}".format(structure.composition.reduced_formula, "electronic transport properties")
return add_namefile(wf_electron_conductivity)
def wf_phonon_conductivity(structure, vasp_input_set_relax=None, vasp_input_set_fixvol_relax=None, vasp_input_set_dfpt=None, vasp_kpoint_set=None,
vasp_cmd=">>vasp_cmd<<", db_file=">>db_file<<", Temp=None, ifscale=None, supercell=None):
""" This workflow aims to calculate lattice thermal conductivity of the structure. """
fws = []
# get the input set for the optimization and update it if we passed custom settings
vis_relax = MPRelaxSet(structure, user_incar_settings=vasp_input_set_relax, user_kpoints_settings=vasp_kpoint_set)
# Structure optimization firework
fw_opt_orig = MyOptimizeFW(structure=structure, vasp_input_set=vis_relax, vasp_cmd=vasp_cmd,
db_file=db_file, name="equi structure optimization", count=1, spec={"_queueadapter": {"job_name": 'opt'}})
fws.append(fw_opt_orig) #1
# Structure optimization firework for 0.4% smaller and 0.4% larger structures
fw_opt_minus = MyOptimizeFW(structure=structure, vasp_input_set_params=vasp_input_set_fixvol_relax, strain=-0.004, prev_calc_loc="equi structure optimization-final",
vasp_cmd=vasp_cmd, db_file=db_file, name="minus structure optimization", count=1, parents=fws[0], spec={"_queueadapter": {"job_name": 'opt'}})
fw_opt_plus = MyOptimizeFW(structure=structure, vasp_input_set_params=vasp_input_set_fixvol_relax, strain=0.004, prev_calc_loc="equi structure optimization-final",
vasp_cmd=vasp_cmd, db_file=db_file, name="plus structure optimization", count=1, parents=fws[0], spec={"_queueadapter": {"job_name": 'opt'}})
fws.append(fw_opt_minus) #2
fws.append(fw_opt_plus) #3
# 2nd Force Constant calculation using DFPT
fw_dfpt_orig = MyPhononFW(structure=structure, user_incar_settings=vasp_input_set_dfpt, prev_calc_loc="equi structure optimization-final", supercell=supercell,
name="orig phonon band", vasp_cmd=vasp_cmd, db_file=db_file, parents=fws[0], spec={"_queueadapter": {"job_name": 'dfpt', "ppnode": 8}})
fw_dfpt_minus = MyPhononFW(structure=structure, user_incar_settings=vasp_input_set_dfpt, prev_calc_loc="minus structure optimization-final", supercell=supercell,
name="minus phonon band", vasp_cmd=vasp_cmd, db_file=db_file, parents=fws[1], spec={"_queueadapter": {"job_name": 'dfpt', "ppnode": 8}})
fw_dfpt_plus = MyPhononFW(structure=structure, user_incar_settings=vasp_input_set_dfpt, prev_calc_loc="plus structure optimization-final", supercell=supercell,
name="plus phonon band", vasp_cmd=vasp_cmd, db_file=db_file, parents=fws[2], spec={"_queueadapter": {"job_name": 'dfpt', "ppnode": 8}})
fws.append(fw_dfpt_orig) #4
fws.append(fw_dfpt_minus) #5
fws.append(fw_dfpt_plus) #6
# get band.yaml and gruneisen.yaml
fw_phoncond = CalPhonCondFW(structure=structure, Temp=Temp, ifscale=ifscale, supercell=supercell, name="thermal conductivity",
db_file=db_file, parents=fws[3:6], spec={"_queueadapter": {"job_name": 'AICON'}})
fws.append(fw_phoncond)
wf_phonon_conductivity = Workflow(fws)
wf_phonon_conductivity.name = "{}:{}".format(structure.composition.reduced_formula, "phonon transport properties")
return add_namefile(wf_phonon_conductivity) | AICON | /AICON-2.0.1-py3-none-any.whl/aicon/myworkflow.py | myworkflow.py |
from atomate.vasp.config import HALF_KPOINTS_FIRST_RELAX, RELAX_MAX_FORCE, \
VASP_CMD, DB_FILE
from fireworks import Firework
from pymatgen.core import Structure
from pymatgen.io.vasp.sets import MPRelaxSet, MPStaticSet
from atomate.common.firetasks.glue_tasks import PassCalcLocs
from atomate.vasp.firetasks.glue_tasks import CopyVaspOutputs, pass_vasp_result
from atomate.vasp.firetasks.parse_outputs import VaspToDb
from atomate.vasp.firetasks.run_calc import RunVaspCustodian, RunVaspDirect
from atomate.vasp.firetasks.write_inputs import WriteVaspFromIOSet
from aicon.myfiretasks import CheckOptimization, WriteVaspStaticFromPrev, WriteVaspNSCFFromPrev, \
WriteEMCInput, WriteVaspForDeformedCrystal, BuildAICONDir, RunAICONForElec, WritePhononBand, \
BuildPhonopyDir, RunAICONForPhon, WriteSupercellWithDisp
class MyOptimizeFW(Firework):
def __init__(self, structure, name="structure optimization",
vasp_input_set=None, vasp_input_set_params=None, count=1,
vasp_cmd=VASP_CMD,
ediffg=None, db_file=DB_FILE,
prev_calc_loc = False, strain=0.0,
force_gamma=True, job_type="normal",
max_force_threshold=RELAX_MAX_FORCE,
auto_npar=">>auto_npar<<",
half_kpts_first_relax=HALF_KPOINTS_FIRST_RELAX, parents=None,
**kwargs):
"""
Optimize the given structure.
Args:
structure (Structure): Input structure. Note that for prev_calc_loc jobs, the structure
is only used to set the name of the FW and any structure with the same composition
can be used.
name (str): Name for the Firework.
vasp_input_set (VaspInputSet): input set to use. Defaults to MPRelaxSet() if None.
vasp_input_set_params (dict): Parameters in INCAR to override.
count (int): A counter to record round of structure optimization.
vasp_cmd (str): Command to run vasp.
ediffg (float): Shortcut to set ediffg in certain jobs
db_file (str): Path to file specifying db credentials to place output parsing.
prev_calc_loc (bool or str): If true, copies outputs from previous calc. If
a str value, retrieves a previous calculation output by name. If False/None, will create
new static calculation using the provided structure.
strain (float): strain executed on structure in each direction of lattice.
force_gamma (bool): Force gamma centered kpoint generation
job_type (str): custodian job type (default "double_relaxation_run")
max_force_threshold (float): max force on a site allowed at end; otherwise, reject job
auto_npar (bool or str): whether to set auto_npar. defaults to env_chk: ">>auto_npar<<"
half_kpts_first_relax (bool): whether to use half the kpoints for the first relaxation
parents ([Firework]): Parents of this particular Firework.
\*\*kwargs: Other kwargs that are passed to Firework.__init__.
"""
t = []
if parents and prev_calc_loc:
t.append(CopyVaspOutputs(calc_loc=prev_calc_loc, contcar_to_poscar=True))
t.append(WriteVaspForDeformedCrystal(strain=strain, user_incar_settings=vasp_input_set_params))
else:
t.append(WriteVaspFromIOSet(structure=structure, vasp_input_set=vasp_input_set))
t.append(RunVaspCustodian(vasp_cmd=vasp_cmd, job_type=job_type, auto_npar=auto_npar, gzip_output=False))
t.append(CheckOptimization(vasp_input_set=vasp_input_set, vasp_input_set_params=vasp_input_set_params,
vasp_cmd=vasp_cmd, db_file=db_file, name=name, count=count, kwargs=kwargs))
super(MyOptimizeFW, self).__init__(t, parents=parents, name="{}-{}-{}".
format(structure.composition.reduced_formula, name, str(count)),
**kwargs)
class MyStaticFW(Firework):
def __init__(self, structure=None, name="static", vasp_input_set=None, vasp_input_set_params=None, vasp_kpoint_set=None,
vasp_cmd=VASP_CMD, prev_calc_loc=True, prev_calc_dir=None, db_file=DB_FILE, vasptodb_kwargs=None,
parents=None, **kwargs):
"""
Standard static calculation Firework - either from a previous location or from a structure.
Args:
structure (Structure): Input structure. Note that for prev_calc_loc jobs, the structure
is only used to set the name of the FW and any structure with the same composition
can be used.
name (str): Name for the Firework.
vasp_input_set (VaspInputSet): Input set to use (for jobs w/no parents)
Defaults to MPStaticSet() if None.
vasp_input_set_params (dict): Parameters in INCAR to override.
vasp_kpoint_set: (Kpoint): Kpoint set to use.
vasp_cmd (str): Command to run vasp.
prev_calc_loc (bool or str): If true (default), copies outputs from previous calc. If
a str value, retrieves a previous calculation output by name. If False/None, will create
new static calculation using the provided structure.
prev_calc_dir (str): Path to a previous calculation to copy from
db_file (str): Path to file specifying db credentials.
parents (Firework): Parents of this particular Firework. FW or list of FWS.
vasptodb_kwargs (dict): kwargs to pass to VaspToDb
\*\*kwargs: Other kwargs that are passed to Firework.__init__.
"""
t = []
vasp_input_set_params = vasp_input_set_params or {}
vasptodb_kwargs = vasptodb_kwargs or {}
if "additional_fields" not in vasptodb_kwargs:
vasptodb_kwargs["additional_fields"] = {}
vasptodb_kwargs["additional_fields"]["task_label"] = name
fw_name = "{}-{}".format(structure.composition.reduced_formula if structure else "unknown", name)
if prev_calc_dir:
t.append(CopyVaspOutputs(calc_dir=prev_calc_dir, contcar_to_poscar=True))
t.append(WriteVaspStaticFromPrev(user_incar_settings=vasp_input_set_params, standardize=True))
elif parents:
if prev_calc_loc:
t.append(CopyVaspOutputs(calc_loc=prev_calc_loc, contcar_to_poscar=True))
t.append(WriteVaspStaticFromPrev(user_incar_settings=vasp_input_set_params, standardize=True))
elif structure:
vasp_input_set = vasp_input_set or MPStaticSet(structure)
t.append(WriteVaspFromIOSet(structure=structure,
vasp_input_set=vasp_input_set,
vasp_input_params=vasp_input_set_params))
else:
raise ValueError("Must specify structure or previous calculation")
t.append(RunVaspCustodian(vasp_cmd=vasp_cmd, auto_npar=">>auto_npar<<", gzip_output=False))
t.append(PassCalcLocs(name=name))
super(MyStaticFW, self).__init__(t, parents=parents, name=fw_name, **kwargs)
class MyNonSCFFW(Firework):
def __init__(self, parents=None, prev_calc_dir=None, structure=None,
name="nscf", mode="line", vasp_cmd=VASP_CMD,
prev_calc_loc=True, db_file=DB_FILE,
vasp_input_set_params=None, **kwargs):
"""
Standard NonSCF Calculation Firework supporting uniform and line modes.
Args:
structure (Structure): Input structure - used only to set the name
of the FW.
name (str): Name for the Firework.
mode (str): "uniform" or "line" mode.
vasp_cmd (str): Command to run vasp.
prev_calc_loc (bool or str): Whether to copy outputs from previous run. Defaults to True.
prev_calc_dir (str): Path to a previous calculation to copy from
db_file (str): Path to file specifying db credentials.
parents (Firework): Parents of this particular Firework.
FW or list of FWS.
vasp_input_set_params (dict): Parameters in INCAR to override.
\*\*kwargs: Other kwargs that are passed to Firework.__init__.
"""
vasp_input_set_params = vasp_input_set_params or {}
fw_name = "{}-{} {}".format(structure.composition.reduced_formula if
structure else "unknown", name, mode)
t = []
if prev_calc_dir:
t.append(CopyVaspOutputs(calc_dir=prev_calc_dir,
additional_files=["CHGCAR"]))
elif parents:
t.append(CopyVaspOutputs(calc_loc=prev_calc_loc,
additional_files=["CHGCAR"]))
else:
raise ValueError("Must specify previous calculation for NonSCFFW")
mode = mode.lower()
if mode == "uniform":
t.append(WriteVaspNSCFFromPrev(prev_calc_dir=".", mode="uniform", user_incar_settings=vasp_input_set_params))
else:
t.append(WriteVaspNSCFFromPrev(prev_calc_dir=".", mode="line", user_incar_settings=vasp_input_set_params))
t.append(RunVaspCustodian(vasp_cmd=vasp_cmd, auto_npar=">>auto_npar<<", gzip_output=False))
t.append(PassCalcLocs(name=name))
super(MyNonSCFFW, self).__init__(t, parents=parents, name=fw_name, **kwargs)
class MyDFPTFW(Firework):
def __init__(self, structure=None, prev_calc_dir=None, name="dielectric", vasp_cmd=VASP_CMD,
prev_calc_loc=True, lepsilon=True,
db_file=DB_FILE, parents=None, user_incar_settings=None, user_kpoints_settings=None,
pass_nm_results=False, **kwargs):
"""
Static DFPT calculation Firework
Args:
structure (Structure): Input structure. If prev_calc_loc, used only to set the
name of the FW.
name (str): Name for the Firework.
lepsilon (bool): Turn on LEPSILON to calculate polar properties
vasp_cmd (str): Command to run vasp.
prev_calc_loc (str or bool): Whether to copy outputs from previous run. Defaults to True.
prev_calc_dir (str): Path to a previous calculation to copy from
db_file (str): Path to file specifying db credentials.
parents (Firework): Parents of this particular Firework.
FW or list of FWS.
user_incar_settings (dict): Parameters in INCAR to override
user_kpoints_settings (Kpoint): Kpoint set to use.
pass_nm_results (bool): if true the normal mode eigen vals and vecs are passed so that
next firework can use it.
\*\*kwargs: Other kwargs that are passed to Firework.__init__.
"""
name = "dielectric" if lepsilon else "phonon"
fw_name = "{}-{}".format(structure.composition.reduced_formula if structure else "unknown", name)
user_incar_settings = user_incar_settings or {}
t = []
if prev_calc_dir:
t.append(CopyVaspOutputs(calc_dir=prev_calc_dir, contcar_to_poscar=True))
t.append(WriteVaspStaticFromPrev(lepsilon=lepsilon, user_incar_settings=user_incar_settings))
elif parents and prev_calc_loc:
t.append(CopyVaspOutputs(calc_loc=prev_calc_loc, contcar_to_poscar=True))
t.append(WriteVaspStaticFromPrev(lepsilon=lepsilon, user_incar_settings=user_incar_settings))
elif structure:
vasp_input_set = MPStaticSet(structure, lepsilon=lepsilon, user_kpoints_settings=user_kpoints_settings,
user_incar_settings=user_incar_settings)
t.append(WriteVaspFromIOSet(structure=structure, vasp_input_set=vasp_input_set))
else:
raise ValueError("Must specify structure or previous calculation")
t.append(RunVaspCustodian(vasp_cmd=vasp_cmd, gzip_output=False))
if pass_nm_results:
t.append(pass_vasp_result({"structure": "a>>final_structure",
"eigenvals": "a>>normalmode_eigenvals",
"eigenvecs": "a>>normalmode_eigenvecs"},
parse_eigen=True,
mod_spec_key="normalmodes"))
t.append(PassCalcLocs(name=name))
super(MyDFPTFW, self).__init__(t, parents=parents, name=fw_name, **kwargs)
class MyElasticFW(Firework):
def __init__(self, structure=None, name="elastic", vasp_input_set=None, vasp_input_set_params=None, vasp_kpoint_set=None,
vasp_cmd=VASP_CMD, prev_calc_loc=True, prev_calc_dir=None, db_file=DB_FILE, vasptodb_kwargs=None,
parents=None, **kwargs):
"""
Elastic tensor calculation Firework - either from a previous location or from a structure.
Args:
structure (Structure): Input structure. Note that for prev_calc_loc jobs, the structure
is only used to set the name of the FW and any structure with the same composition
can be used.
name (str): Name for the Firework.
vasp_input_set (VaspInputSet): input set to use (for jobs w/no parents) Defaults to MPStaticSet() if None.
vasp_input_set_params (dict): Parameters in INCAR to override.
vasp_cmd (str): Command to run vasp.
prev_calc_loc (bool or str): If true (default), copies outputs from previous calc. If
a str value, retrieves a previous calculation output by name. If False/None, will create
new static calculation using the provided structure.
prev_calc_dir (str): Path to a previous calculation to copy from
db_file (str): Path to file specifying db credentials.
parents (Firework): Parents of this particular Firework. FW or list of FWS.
vasptodb_kwargs (dict): kwargs to pass to VaspToDb
\*\*kwargs: Other kwargs that are passed to Firework.__init__.
"""
t = []
vasp_input_set_params = vasp_input_set_params or {}
vasptodb_kwargs = vasptodb_kwargs or {}
if "additional_fields" not in vasptodb_kwargs:
vasptodb_kwargs["additional_fields"] = {}
vasptodb_kwargs["additional_fields"]["task_label"] = name
fw_name = "{}-{}".format(structure.composition.reduced_formula if structure else "unknown", name)
if prev_calc_dir:
t.append(CopyVaspOutputs(calc_dir=prev_calc_dir, contcar_to_poscar=True))
t.append(WriteVaspStaticFromPrev(user_incar_settings=vasp_input_set_params))
elif parents:
if prev_calc_loc:
t.append(CopyVaspOutputs(calc_loc=prev_calc_loc, contcar_to_poscar=True))
t.append(WriteVaspStaticFromPrev(user_incar_settings=vasp_input_set_params))
elif structure:
vasp_input_set = vasp_input_set or MPStaticSet(structure)
t.append(WriteVaspFromIOSet(structure=structure,
vasp_input_set=vasp_input_set,
vasp_input_params=vasp_input_set_params))
else:
raise ValueError("Must specify structure or previous calculation")
t.append(RunVaspCustodian(vasp_cmd=vasp_cmd, auto_npar=">>auto_npar<<", gzip_output=False))
t.append(PassCalcLocs(name=name))
super(MyElasticFW, self).__init__(t, parents=parents, name=fw_name, **kwargs)
class MyEffectivemassFW(Firework):
def __init__(self, parents=None, prev_calc_dir=None, structure=None,
name="effective mass", mode="uniform", whichbnd="CBM", stepsize=0.01,
vasp_cmd=VASP_CMD, prev_calc_loc="True", db_file=DB_FILE,
vasp_input_set_params=None, **kwargs):
"""
Modified NonSCF Calculation Firework supporting uniform and line modes.
Args:
structure (Structure): Input structure - used only to set the name of the FW.
name (str): Name for the Firework.
mode (str): "uniform" or "line" mode.
whichbnd (str): specify the name of band to calculate (CBM, VBM, CSB, VSB).
stepsize (float): stepsize for finite difference in Bohr. Default is 0.01.
vasp_cmd (str): Command to run vasp.
prev_calc_loc (bool or str): Whether to copy outputs from previous run. Defaults to True.
prev_calc_dir (str): Path to a previous calculation to copy from
db_file (str): Path to file specifying db credentials.
parents (Firework): Parents of this particular Firework. FW or list of FWS.
vasp_input_set_params (dict): Parameters in INCAR to override.
\*\*kwargs: Other kwargs that are passed to Firework.__init__.
"""
vasp_input_set_params = vasp_input_set_params or {}
fw_name = "{}-{} {}".format(structure.composition.reduced_formula if
structure else "unknown", name, whichbnd)
t = []
if prev_calc_dir:
t.append(CopyVaspOutputs(calc_dir=prev_calc_dir,
additional_files=["CHGCAR"]))
elif parents and prev_calc_loc:
t.append(CopyVaspOutputs(calc_loc=prev_calc_loc,
additional_files=["CHGCAR"]))
else:
raise ValueError("Must specify previous calculation for MyEffectivemassFW")
mode = mode.lower()
t.append(WriteVaspNSCFFromPrev(prev_calc_dir=".", mode="line", user_incar_settings=vasp_input_set_params))
# write INPCAR and KPOINTS
t.append(WriteEMCInput(bnd_name=whichbnd, calc_loc='equi nscf', step_size=stepsize))
t.append(RunVaspCustodian(vasp_cmd=vasp_cmd, auto_npar=">>auto_npar<<", gzip_output=False))
t.append(PassCalcLocs(name=whichbnd))
super(MyEffectivemassFW, self).__init__(t, parents=parents, name=fw_name, **kwargs)
class CalElecCondFW(Firework):
def __init__(self, structure=None, name="electrical conductivity", db_file=DB_FILE,
parents=None, mode=None, Temp=None, Doping=None, ifSB=None, **kwargs):
"""
electrical conductivity calculation firework
Args:
structure (Structure): Input structure, used only to set the name of the FW.
name (str): Name for the Firework.
db_file (str): Path to file specifying db credentials.
parents (Firework): Parents of this particular Firework. FW or list of FWS.
mode (str): AICON mode, either standard or doping.
Temp (list): Temperature value array.
Doping (list): Doping value array.
ifSB (bool): if consider the second band's contribution.
\*\*kwargs: Other kwargs that are passed to Firework.__init__.
"""
fw_name = "{}-{}".format(structure.composition.reduced_formula if structure else "unknown", name)
t = []
t.append(BuildAICONDir())
t.append(RunAICONForElec(mode=mode, Temp=Temp, Doping=Doping, ifSB=ifSB))
t.append(PassCalcLocs(name=name))
super(CalElecCondFW, self).__init__(t, parents=parents, name=fw_name, **kwargs)
class MyPhononFW(Firework):
def __init__(self, structure=None, prev_calc_dir=None, name="phonon band", vasp_cmd=VASP_CMD,
prev_calc_loc=True, db_file=DB_FILE, parents=None, user_incar_settings=None,
user_kpoints_settings=None, supercell=None, **kwargs):
"""
Phonon calculation Firework using DFPT
Args:
structure (Structure): Input structure. If prev_calc_loc, used only to set the
name of the FW.
name (str): Name for the Firework.
vasp_cmd (str): Command to run vasp.
prev_calc_loc (str or bool): Whether to copy outputs from previous run. Defaults to True.
prev_calc_dir (str): Path to a previous calculation to copy from
db_file (str): Path to file specifying db credentials.
parents (Firework): Parents of this particular Firework.
FW or list of FWS.
user_incar_settings (dict): Parameters in INCAR to override
user_kpoints_settings (Kpoint): Kpoint set to use.
supercell (list) size of supercell:
\*\*kwargs: Other kwargs that are passed to Firework.__init__.
"""
fw_name = "{}-{}".format(structure.composition.reduced_formula if structure else "unknown", name)
t = []
if prev_calc_dir:
t.append(CopyVaspOutputs(calc_dir=prev_calc_dir, contcar_to_poscar=True))
t.append(WriteVaspStaticFromPrev(user_incar_settings=user_incar_settings, standardize=False, supercell=supercell))
elif parents and prev_calc_loc:
t.append(CopyVaspOutputs(calc_loc=prev_calc_loc, contcar_to_poscar=True))
t.append(WriteVaspStaticFromPrev(user_incar_settings=user_incar_settings, standardize=False, supercell=supercell))
elif structure:
vasp_input_set = MPStaticSet(structure, user_kpoints_settings=user_kpoints_settings,
user_incar_settings=user_incar_settings)
t.append(WriteVaspFromIOSet(structure=structure, vasp_input_set=vasp_input_set))
else:
raise ValueError("Must specify structure or previous calculation")
t.append(RunVaspCustodian(vasp_cmd=vasp_cmd, gzip_output=False))
t.append(WritePhononBand(supercell=supercell))
t.append(PassCalcLocs(name=name))
super(MyPhononFW, self).__init__(t, parents=parents, name=fw_name, **kwargs)
class CalPhonCondFW(Firework):
def __init__(self, structure=None, name="thermal conductivity", db_file=DB_FILE,
parents=None, Temp=None, ifscale=None, supercell=None, **kwargs):
"""
lattice thermal conductivity calculation firework
Args:
structure (Structure): Input structure, used only to set the name of the FW.
name (str): Name for the Firework.
db_file (str): Path to file specifying db credentials.
parents (Firework): Parents of this particular Firework. FW or list of FWS.
Temp (list): Temperature value array.
ifscale (bool): If multiply a scaling factor with Kappa.
supercell (list): size of supercell.
\*\*kwargs: Other kwargs that are passed to Firework.__init__.
"""
fw_name = "{}-{}".format(structure.composition.reduced_formula if structure else "unknown", name)
t = []
t.append(BuildPhonopyDir(supercell=supercell))
t.append(RunAICONForPhon(Temp=Temp, ifscale=ifscale))
t.append(PassCalcLocs(name=name))
super(CalPhonCondFW, self).__init__(t, parents=parents, name=fw_name, **kwargs)
class MyPhononFiniteDiffFW(Firework):
def __init__(self, structure=None, prev_calc_dir=None, name="phonon band", vasp_cmd=VASP_CMD,
prev_calc_loc=True, db_file=DB_FILE, parents=None, user_incar_settings=None,
user_kpoints_settings=None, supercell=None, **kwargs):
"""
Phonon calculation Firework using finite difference method
Args:
structure (Structure): Input structure. If prev_calc_loc, used only to set the
name of the FW.
name (str): Name for the Firework.
vasp_cmd (str): Command to run vasp.
prev_calc_loc (str or bool): Whether to copy outputs from previous run. Defaults to True.
prev_calc_dir (str): Path to a previous calculation to copy from
db_file (str): Path to file specifying db credentials.
parents (Firework): Parents of this particular Firework.
FW or list of FWS.
user_incar_settings (dict): Parameters in INCAR to override
user_kpoints_settings (Kpoint): Kpoint set to use.
supercell (list) size of supercell:
\*\*kwargs: Other kwargs that are passed to Firework.__init__.
"""
fw_name = "{}-{}".format(structure.composition.reduced_formula if structure else "unknown", name)
t = []
if prev_calc_dir:
t.append(CopyVaspOutputs(calc_dir=prev_calc_dir, contcar_to_poscar=True))
t.append(WriteVaspStaticFromPrev(user_incar_settings=user_incar_settings, standardize=False, supercell=supercell))
elif parents and prev_calc_loc:
t.append(CopyVaspOutputs(calc_loc=prev_calc_loc, contcar_to_poscar=True))
t.append(WriteVaspStaticFromPrev(user_incar_settings=user_incar_settings, standardize=False, supercell=supercell))
elif structure:
vasp_input_set = MPStaticSet(structure, user_kpoints_settings=user_kpoints_settings,
user_incar_settings=user_incar_settings)
t.append(WriteVaspFromIOSet(structure=structure, vasp_input_set=vasp_input_set))
else:
raise ValueError("Must specify structure or previous calculation")
t.append(WriteSupercellWithDisp(supercell=supercell))
t.append(PassCalcLocs(name=name))
super(MyPhononFiniteDiffFW, self).__init__(t, parents=parents, name=fw_name, **kwargs) | AICON | /AICON-2.0.1-py3-none-any.whl/aicon/myfireworks.py | myfireworks.py |
import numpy as np
from numpy.linalg import inv
import pymatgen as pmg
from pymatgen.io.vasp import Outcar
class ElasticConst(object):
''' Elastic constant class. '''
def __init__(self):
self.value = 0.0
self.tensor = list()
def __get__(self, obj, typ = None):
return self.value
def __str__(self):
return '%.3f' % (self.value)
__repr__ = __str__
def Get_ElasConst(self, filepath):
outcar = Outcar(filepath + "OUTCAR")
outcar.read_elastic_tensor()
self.tensor = np.array(outcar.data["elastic_tensor"]) / 10 #unit in GPa
def Get_AvgLongEConst(self, filepath):
self.Get_ElasConst(filepath)
self.value_2 = self.tensor[0,1] + 2*self.tensor[3,3] + 3/5 * (self.tensor[0,0] - self.tensor[0,1] - 2*self.tensor[3,3])
self.Stensor = inv(self.tensor)
struct = pmg.core.Structure.from_file(filepath + "POSCAR")
self.density = struct.density * 1e3 #kg/m^3
B_v = ((self.tensor[0,0] + self.tensor[1,1] + self.tensor[2,2]) + 2 * (self.tensor[0,1] + self.tensor[1,2] + self.tensor[2,0])) / 9
G_v = ((self.tensor[0,0] + self.tensor[1,1] + self.tensor[2,2]) - (self.tensor[0,1] + self.tensor[1,2] + self.tensor[2,0]) \
+ 3 * (self.tensor[3,3] + self.tensor[4,4] + self.tensor[5,5])) / 15
B_r = 1 / ((self.Stensor[0,0] + self.Stensor[1,1] + self.Stensor[2,2]) + 2 * (self.Stensor[0,1] + self.Stensor[1,2] + self.Stensor[2,0]))
G_r = 15 / (4*(self.Stensor[0,0] + self.Stensor[1,1] + self.Stensor[2,2]) - 4*(self.Stensor[0,1] + self.Stensor[1,2] + self.Stensor[2,0]) \
+ 3 * (self.Stensor[3,3] + self.Stensor[4,4] + self.Stensor[5,5]))
B_h = (B_v + B_r) / 2
G_h = (G_v + G_r) / 2
self.vel_long = ((B_h + 4/3 * G_h) * 1e9 / self.density)**(1/2)
self.vel_tran = (G_h * 1e9 / self.density)**(1/2)
self.vel_mean = 3 / (1/self.vel_long + 2/self.vel_tran)
self.value = self.vel_mean**2 * self.density / 1e9 | AICON | /AICON-2.0.1-py3-none-any.whl/aicon/elastic.py | elastic.py |
import os
import numpy as np
from pymatgen.io.vasp import BSVasprun
from pymatgen.electronic_structure.core import Spin
from aicon.band import Band
import scipy.constants
import pandas as pd
EBoltzm = scipy.constants.physical_constants['Boltzmann constant in eV/K'][0]
C_e = scipy.constants.e
def Get_Electron(filepath, Temp, dope, mode, ifSB):
'''For electron transport properties calculation '''
Compound = Electron()
Compound.Get_bandstru(filepath + "equi/")
Compound.Get_values(filepath, Temp, dope, mode, ifSB=ifSB)
Compound.Output(Temp, dope, mode)
def find_mu_doping(band, doping, mu, Temp):
'''
Find the reduced chemical potential corresponding to the given carrier concentration.
For carrier from one band.
Parameters
----------
band: Band object
doping: float
The specified carrier concentration, in the unit of cm-3.
mu: list
The list of chemical potential, unit is eV. Should have default values.
Temp: list
The list of Temperature, unit is K.
Returns
-------
mu_x: list
The list of reduced chemical potential corresponding to given doping level and temperature.
'''
mu_x = np.zeros((len(Temp), len(doping)))
delta = np.zeros((len(doping), len(mu)))
for i, T in enumerate(Temp):
X = mu / (EBoltzm * T)
for j, x in enumerate(X):
N = band.Density(x, T) / 1e6
for k, n in enumerate(doping):
delta[k,j] = N - n
delta = np.abs(delta)
mu_x[i] = np.array([X[ind] for ind in np.argmin(delta, axis=1)])
return mu_x
def find_mu_doping2(band1, band2, doping, mu, Temp):
'''
Find the reduced chemical potential corresponding to the given carrier concentration.
For carrier from two bands.
Parameters
----------
band: Band object
doping: float
The specified carrier concentration, in the unit of cm-3.
mu: list
The list of chemical potential, unit is eV. Should have default values.
Temp: list
The list of Temperature, unit is K.
Returns
-------
mu_x: list
The list of reduced chemical potential corresponding to given doping level and temperature.
'''
mu_x = np.zeros((len(Temp), len(doping)))
delta = np.zeros((len(doping), len(mu)))
for i, T in enumerate(Temp):
X = mu / (EBoltzm * T)
gap = (band2.bandgap - band1.bandgap)/(EBoltzm*T)
for j, x in enumerate(X):
N_1 = band1.Density(x, T) / 1e6
N_2 = band2.Density(x - gap, T) / 1e6
for k, n in enumerate(doping):
delta[k,j] = N_1 + N_2 - n
delta = np.abs(delta)
mu_x[i] = np.array([X[ind] for ind in np.argmin(delta, axis=1)])
return mu_x
class Electron(object):
'''Electronic transport properties calculation class'''
def Get_bandstru(self, filepath):
'''Obtain band structure'''
vaspband = BSVasprun(filepath + "/vasprun.xml")
self.engband = vaspband.get_band_structure(kpoints_filename=filepath+"/KPOINTS",line_mode=True)
self.bandgap = self.engband.get_band_gap()['energy']
def Get_CBM(self):
'''Find CBM and instantiate a Band object'''
coord = self.engband.get_cbm()['kpoint'].frac_coords
deg = self.engband.get_kpoint_degeneracy(coord)
cbmbnd = np.min(self.engband.get_cbm()['band_index'][Spin.up])
cbmkpt = self.engband.get_cbm()['kpoint_index'][0]
self.CBM = Band(self.bandgap, deg, isCBM = True, bndindex=cbmbnd, kptindex=cbmkpt)
def Get_VBM(self):
'''Find VBM and instantiate a Band object'''
coord = self.engband.get_vbm()['kpoint'].frac_coords
deg = self.engband.get_kpoint_degeneracy(coord)
vbmbnd = np.max(self.engband.get_vbm()['band_index'][Spin.up])
vbmkpt = self.engband.get_vbm()['kpoint_index'][0]
self.VBM = Band(self.bandgap, deg, isVBM = True, bndindex=vbmbnd, kptindex=vbmkpt)
def Get_SB(self):
'''Find CSB and VSB, then instantiate a Band object'''
CSB_list = list()
VSB_list = list()
cbmbnd = np.min(self.engband.get_cbm()['band_index'][Spin.up])
vbmbnd = np.max(self.engband.get_vbm()['band_index'][Spin.up])
cbmeng = self.engband.get_cbm()['energy']
vbmeng = self.engband.get_vbm()['energy']
for i in np.arange(1, len(self.engband.kpoints)-1):
if (self.engband.kpoints[i].frac_coords == self.engband.kpoints[i-1].frac_coords).all() or (self.engband.kpoints[i].frac_coords == self.engband.kpoints[i+1].frac_coords).all():
if (self.engband.bands[Spin.up][cbmbnd,i] <= self.engband.bands[Spin.up][cbmbnd,i-2]) and (self.engband.bands[Spin.up][cbmbnd,i] <= self.engband.bands[Spin.up][cbmbnd,i+2]) and (np.abs(cbmeng - self.engband.bands[Spin.up][cbmbnd,i]) < 0.2):
if i not in self.engband.get_cbm()['kpoint_index']:
CSB_list.append(i)
if (self.engband.bands[Spin.up][vbmbnd,i] >= self.engband.bands[Spin.up][vbmbnd,i-2]) and (self.engband.bands[Spin.up][vbmbnd,i] >= self.engband.bands[Spin.up][vbmbnd,i+2]) and (np.abs(vbmeng - self.engband.bands[Spin.up][vbmbnd,i]) < 0.2):
if i not in self.engband.get_vbm()['kpoint_index']:
VSB_list.append(i)
else:
if (self.engband.bands[Spin.up][cbmbnd,i] <= self.engband.bands[Spin.up][cbmbnd,i-1]) and (self.engband.bands[Spin.up][cbmbnd,i] <= self.engband.bands[Spin.up][cbmbnd,i+1]) and (np.abs(cbmeng - self.engband.bands[Spin.up][cbmbnd,i]) < 0.2):
if i not in self.engband.get_cbm()['kpoint_index']:
CSB_list.append(i)
if (self.engband.bands[Spin.up][vbmbnd,i] >= self.engband.bands[Spin.up][vbmbnd,i-1]) and (self.engband.bands[Spin.up][vbmbnd,i] >= self.engband.bands[Spin.up][vbmbnd,i+1]) and (np.abs(vbmeng - self.engband.bands[Spin.up][vbmbnd,i]) < 0.2):
if i not in self.engband.get_vbm()['kpoint_index']:
VSB_list.append(i)
CSB = None
VSB = None
if len(CSB_list) != 0:
CSB = CSB_list[0]
for i in np.arange(1, len(CSB_list)):
if self.engband.bands[Spin.up][cbmbnd,CSB_list[i]] < self.engband.bands[Spin.up][cbmbnd,CSB]:
CSB = CSB_list[i]
if len(VSB_list) != 0:
VSB = VSB_list[0]
for i in np.arange(1, len(VSB_list)):
if self.engband.bands[Spin.up][vbmbnd,VSB_list[i]] > self.engband.bands[Spin.up][vbmbnd,VSB]:
VSB = VSB_list[i]
if CSB is not None:
coord = self.engband.kpoints[CSB].frac_coords
deg = self.engband.get_kpoint_degeneracy(coord)
self.CSB = Band(self.bandgap + np.abs(self.engband.bands[Spin.up][cbmbnd,CSB] - cbmeng), deg, isCSB = True, bndindex=cbmbnd, kptindex=CSB)
if VSB is not None:
coord = self.engband.kpoints[VSB].frac_coords
deg = self.engband.get_kpoint_degeneracy(coord)
self.VSB = Band(self.bandgap + np.abs(self.engband.bands[Spin.up][vbmbnd,VSB] - vbmeng), deg, isVSB = True, bndindex=vbmbnd, kptindex=VSB)
def Get_values(self, filepath, Temp, doping, mode, ifSB=True):
'''
Calculate electronic transport properties. The results are either a function of chemical potential and temperature or a function of
carrier concentration and temperature.
Parameters:
----------
filepath: str
Temp: list
The specified temperatures, the unit is K.
doping: list
The specified carrier concentration, the unit is cm-3. Used in 'doping' mode.
mode: str
either 'standard' or 'doping'. In standard mode, the results are a function of chemical potential and temperature, while in
doping mode, the results are a function of specified carrier concentration and temperature.
ifSB: bool
if the second bands (CSB or VSB) are included in the calculation. Sometimes users may not want to consider them in the calculation
even they exist. The default value is True.
The results are stored in the self.data attribute.
'''
self.data = dict()
if mode == 'standard':
mu = np.arange(-self.bandgap/2, -self.bandgap/2 + 1.0, 0.002) #start from the middle of the gap to 1.0 eV higher place, with the stepsize 0.002 eV.
self.mu = mu
self.Get_CBM()
self.Get_VBM()
if ifSB:
self.Get_SB()
mux_array=list()
for T in Temp:
temporary = [ele/(EBoltzm*T) for ele in mu]
mux_array.append(temporary)
mux_array = np.array(mux_array)
(CBM_AcoRelaxT, CBM_OptRelaxT, CBM_ImpRelaxT, CBM_TotalRelaxT, CBM_Density, CBM_Mobility,\
CBM_Elcond, CBM_Seebeck, CBM_Lorenz, CBM_Ekappa, CBM_Hallfactor, CBM_Hallcoeff, CBM_PF) = self.CBM.Get_transport_para(filepath, mux_array, Temp)
(VBM_AcoRelaxT, VBM_OptRelaxT, VBM_ImpRelaxT, VBM_TotalRelaxT, VBM_Density, VBM_Mobility,\
VBM_Elcond, VBM_Seebeck, VBM_Lorenz, VBM_Ekappa, VBM_Hallfactor, VBM_Hallcoeff, VBM_PF) = self.VBM.Get_transport_para(filepath, mux_array, Temp)
self.data['CBM'] = {'TotalRelaxT':CBM_TotalRelaxT, 'AcoRelaxT':CBM_AcoRelaxT, 'OptRelaxT':CBM_OptRelaxT, 'ImpRelaxT':CBM_ImpRelaxT, \
'Mobility':CBM_Mobility, 'Density':CBM_Density, 'Elcond':CBM_Elcond, 'Seebeck':CBM_Seebeck, 'Lorenz':CBM_Lorenz, \
'Ekappa':CBM_Ekappa, 'Hallcoeff':CBM_Hallcoeff, 'PF':CBM_PF}
self.data['VBM'] = {'TotalRelaxT':VBM_TotalRelaxT, 'AcoRelaxT':VBM_AcoRelaxT, 'OptRelaxT':VBM_OptRelaxT, 'ImpRelaxT':VBM_ImpRelaxT, \
'Mobility':VBM_Mobility, 'Density':VBM_Density, 'Elcond':VBM_Elcond, 'Seebeck':VBM_Seebeck, 'Lorenz':VBM_Lorenz, \
'Ekappa':VBM_Ekappa, 'Hallcoeff':VBM_Hallcoeff, 'PF':VBM_PF}
if hasattr(self, 'CSB'):
cmux_array=list()
gap_csb = self.CSB.bandgap - self.CBM.bandgap
for i, T in enumerate(Temp):
temporary = mux_array[i,:] - gap_csb/(EBoltzm*T)
cmux_array.append(temporary)
cmux_array = np.array(cmux_array)
(CSB_AcoRelaxT, CSB_OptRelaxT, CSB_ImpRelaxT, CSB_TotalRelaxT, CSB_Density, CSB_Mobility,\
CSB_Elcond, CSB_Seebeck, CSB_Lorenz, CSB_Ekappa, CSB_Hallfactor, CSB_Hallcoeff, CSB_PF) = self.CSB.Get_transport_para(filepath, cmux_array, Temp)
TCB_Density = CBM_Density + CSB_Density
TCB_Elcond = CBM_Elcond + CSB_Elcond
TCB_Mobility = TCB_Elcond / (C_e * TCB_Density)
TCB_Seebeck = (CBM_Seebeck * CBM_Elcond + CSB_Seebeck * CSB_Elcond ) / TCB_Elcond
TCB_Lorenz = (CBM_Lorenz * CBM_Elcond + CSB_Lorenz * CSB_Elcond) / TCB_Elcond
TCB_Ekappa = np.array([TCB_Lorenz[i] * TCB_Elcond[i] * T for i, T in enumerate(Temp)])
TCB_Hallcoeff = (CBM_Hallfactor * CBM_Mobility * CBM_Elcond + CSB_Hallfactor * CSB_Mobility * CSB_Elcond) / (TCB_Elcond)**2
TCB_PF = TCB_Seebeck**2 * TCB_Elcond
self.data['CSB'] = {'TotalRelaxT':CSB_TotalRelaxT, 'AcoRelaxT':CSB_AcoRelaxT, 'OptRelaxT':CSB_OptRelaxT, 'ImpRelaxT':CSB_ImpRelaxT, \
'Mobility':CSB_Mobility, 'Density':CSB_Density, 'Elcond':CSB_Elcond, 'Seebeck':CSB_Seebeck, 'Lorenz':CSB_Lorenz, \
'Ekappa':CSB_Ekappa, 'Hallcoeff':CSB_Hallcoeff, 'PF':CSB_PF}
self.data['TCB'] = {'Mobility':TCB_Mobility, 'Density':TCB_Density, 'Elcond':TCB_Elcond, 'Seebeck':TCB_Seebeck, 'Lorenz':TCB_Lorenz, \
'Ekappa':TCB_Ekappa, 'Hallcoeff':TCB_Hallcoeff, 'PF':TCB_PF}
if hasattr(self, 'VSB'):
vmux_array=list()
gap_vsb = self.VSB.bandgap - self.VBM.bandgap
for i, T in enumerate(Temp):
temporary = mux_array[i,:] - gap_vsb/(EBoltzm*T)
vmux_array.append(temporary)
vmux_array = np.array(vmux_array)
(VSB_AcoRelaxT, VSB_OptRelaxT, VSB_ImpRelaxT, VSB_TotalRelaxT, VSB_Density, VSB_Mobility,\
VSB_Elcond, VSB_Seebeck, VSB_Lorenz, VSB_Ekappa, VSB_Hallfactor, VSB_Hallcoeff, VSB_PF) = self.VSB.Get_transport_para(filepath, vmux_array, Temp)
TVB_Density = VBM_Density + VSB_Density
TVB_Elcond = VBM_Elcond + VSB_Elcond
TVB_Mobility = TVB_Elcond / (C_e * TVB_Density)
TVB_Seebeck = (VBM_Seebeck * VBM_Elcond + VSB_Seebeck * VSB_Elcond ) / TVB_Elcond
TVB_Lorenz = (VBM_Lorenz * VBM_Elcond + VSB_Lorenz * VSB_Elcond) / TVB_Elcond
TVB_Ekappa = np.array([TVB_Lorenz[i] * TVB_Elcond[i] * T for i, T in enumerate(Temp)])
TVB_Hallcoeff = (VBM_Hallfactor * VBM_Mobility * VBM_Elcond + VSB_Hallfactor * VSB_Mobility * VSB_Elcond) / (TVB_Elcond)**2
TVB_PF = TVB_Seebeck**2 * TVB_Elcond
self.data['VSB'] = {'TotalRelaxT':VSB_TotalRelaxT, 'AcoRelaxT':VSB_AcoRelaxT, 'OptRelaxT':VSB_OptRelaxT, 'ImpRelaxT':VSB_ImpRelaxT, \
'Mobility':VSB_Mobility, 'Density':VSB_Density, 'Elcond':VSB_Elcond, 'Seebeck':VSB_Seebeck, 'Lorenz':VSB_Lorenz, \
'Ekappa':VSB_Ekappa, 'Hallcoeff':VSB_Hallcoeff, 'PF':VSB_PF}
self.data['TVB'] = {'Mobility':TVB_Mobility, 'Density':TVB_Density, 'Elcond':TVB_Elcond, 'Seebeck':TVB_Seebeck, 'Lorenz':TVB_Lorenz, \
'Ekappa':TVB_Ekappa, 'Hallcoeff':TVB_Hallcoeff, 'PF':TVB_PF}
if mode == 'doping':
mu = np.arange(-self.bandgap/2, -self.bandgap/2 + 1.0, 0.0005)
self.Get_CBM()
self.Get_VBM()
if ifSB:
self.Get_SB()
self.CBM.Get_carridensity(filepath)
self.VBM.Get_carridensity(filepath)
if hasattr(self, 'CSB'):
gap_csb = self.CSB.bandgap - self.CBM.bandgap
self.CSB.Get_carridensity(filepath)
cmux_array = find_mu_doping2(self.CBM, self.CSB, doping, mu, Temp)
csmux_array=list()
for i, T in enumerate(Temp):
temporary = cmux_array[i,:] - gap_csb/(EBoltzm*T)
csmux_array.append(temporary)
csmux_array = np.array(csmux_array)
(CBM_AcoRelaxT, CBM_OptRelaxT, CBM_ImpRelaxT, CBM_TotalRelaxT, CBM_Density, CBM_Mobility,\
CBM_Elcond, CBM_Seebeck, CBM_Lorenz, CBM_Ekappa, CBM_Hallfactor, CBM_Hallcoeff, CBM_PF) = self.CBM.Get_transport_para(filepath, cmux_array, Temp)
(CSB_AcoRelaxT, CSB_OptRelaxT, CSB_ImpRelaxT, CSB_TotalRelaxT, CSB_Density, CSB_Mobility,\
CSB_Elcond, CSB_Seebeck, CSB_Lorenz, CSB_Ekappa, CSB_Hallfactor, CSB_Hallcoeff, CSB_PF) = self.CSB.Get_transport_para(filepath, csmux_array, Temp)
TCB_Density = CBM_Density + CSB_Density
TCB_Elcond = CBM_Elcond + CSB_Elcond
TCB_Mobility = TCB_Elcond / (C_e * TCB_Density)
TCB_Seebeck = (CBM_Seebeck * CBM_Elcond + CSB_Seebeck * CSB_Elcond ) / TCB_Elcond
TCB_Lorenz = (CBM_Lorenz * CBM_Elcond + CSB_Lorenz * CSB_Elcond) / TCB_Elcond
TCB_Ekappa = np.array([TCB_Lorenz[i] * TCB_Elcond[i] * T for i, T in enumerate(Temp)])
TCB_Hallcoeff = (CBM_Hallfactor * CBM_Mobility * CBM_Elcond + CSB_Hallfactor * CSB_Mobility * CSB_Elcond) / (TCB_Elcond)**2
TCB_PF = TCB_Seebeck**2 * TCB_Elcond
self.data['CBM'] = {'TotalRelaxT':CBM_TotalRelaxT, 'AcoRelaxT':CBM_AcoRelaxT, 'OptRelaxT':CBM_OptRelaxT, 'ImpRelaxT':CBM_ImpRelaxT, \
'Mobility':CBM_Mobility, 'Density':CBM_Density, 'Elcond':CBM_Elcond, 'Seebeck':CBM_Seebeck, 'Lorenz':CBM_Lorenz, \
'Ekappa':CBM_Ekappa, 'Hallcoeff':CBM_Hallcoeff, 'PF':CBM_PF}
self.data['CSB'] = {'TotalRelaxT':CSB_TotalRelaxT, 'AcoRelaxT':CSB_AcoRelaxT, 'OptRelaxT':CSB_OptRelaxT, 'ImpRelaxT':CSB_ImpRelaxT, \
'Mobility':CSB_Mobility, 'Density':CSB_Density, 'Elcond':CSB_Elcond, 'Seebeck':CSB_Seebeck, 'Lorenz':CSB_Lorenz, \
'Ekappa':CSB_Ekappa, 'Hallcoeff':CSB_Hallcoeff, 'PF':CSB_PF}
self.data['TCB'] = {'Mobility':TCB_Mobility, 'Density':TCB_Density, 'Elcond':TCB_Elcond, 'Seebeck':TCB_Seebeck, 'Lorenz':TCB_Lorenz, \
'Ekappa':TCB_Ekappa, 'Hallcoeff':TCB_Hallcoeff, 'PF':TCB_PF}
else:
cmux_array = find_mu_doping(self.CBM, doping, mu, Temp)
(CBM_AcoRelaxT, CBM_OptRelaxT, CBM_ImpRelaxT, CBM_TotalRelaxT, CBM_Density, CBM_Mobility,\
CBM_Elcond, CBM_Seebeck, CBM_Lorenz, CBM_Ekappa, CBM_Hallfactor, CBM_Hallcoeff, CBM_PF) = self.CBM.Get_transport_para(filepath, cmux_array, Temp)
self.data['CBM'] = {'TotalRelaxT':CBM_TotalRelaxT, 'AcoRelaxT':CBM_AcoRelaxT, 'OptRelaxT':CBM_OptRelaxT, 'ImpRelaxT':CBM_ImpRelaxT, \
'Mobility':CBM_Mobility, 'Density':CBM_Density, 'Elcond':CBM_Elcond, 'Seebeck':CBM_Seebeck, 'Lorenz':CBM_Lorenz, \
'Ekappa':CBM_Ekappa, 'Hallcoeff':CBM_Hallcoeff, 'PF':CBM_PF}
if hasattr(self, 'VSB'):
gap_vsb = self.VSB.bandgap - self.VBM.bandgap
self.VSB.Get_carridensity(filepath)
vmux_array = find_mu_doping2(self.VBM, self.VSB, doping, mu, Temp)
vsmux_array=list()
for i, T in enumerate(Temp):
temporary = vmux_array[i,:] - gap_vsb/(EBoltzm*T)
vsmux_array.append(temporary)
vsmux_array = np.array(vsmux_array)
(VBM_AcoRelaxT, VBM_OptRelaxT, VBM_ImpRelaxT, VBM_TotalRelaxT, VBM_Density, VBM_Mobility,\
VBM_Elcond, VBM_Seebeck, VBM_Lorenz, VBM_Ekappa, VBM_Hallfactor, VBM_Hallcoeff, VBM_PF) = self.VBM.Get_transport_para(filepath, vmux_array, Temp)
(VSB_AcoRelaxT, VSB_OptRelaxT, VSB_ImpRelaxT, VSB_TotalRelaxT, VSB_Density, VSB_Mobility,\
VSB_Elcond, VSB_Seebeck, VSB_Lorenz, VSB_Ekappa, VSB_Hallfactor, VSB_Hallcoeff, VSB_PF) = self.VSB.Get_transport_para(filepath, vsmux_array, Temp)
TVB_Density = VBM_Density + VSB_Density
TVB_Elcond = VBM_Elcond + VSB_Elcond
TVB_Mobility = TVB_Elcond / (C_e * TVB_Density)
TVB_Seebeck = (VBM_Seebeck * VBM_Elcond + VSB_Seebeck * VSB_Elcond ) / TVB_Elcond
TVB_Lorenz = (VBM_Lorenz * VBM_Elcond + VSB_Lorenz * VSB_Elcond) / TVB_Elcond
TVB_Ekappa = np.array([TVB_Lorenz[i] * TVB_Elcond[i] * T for i, T in enumerate(Temp)])
TVB_Hallcoeff = (VBM_Hallfactor * VBM_Mobility * VBM_Elcond + VSB_Hallfactor * VSB_Mobility * VSB_Elcond) / (TVB_Elcond)**2
TVB_PF = TVB_Seebeck**2 * TVB_Elcond
self.data['VBM'] = {'TotalRelaxT':VBM_TotalRelaxT, 'AcoRelaxT':VBM_AcoRelaxT, 'OptRelaxT':VBM_OptRelaxT, 'ImpRelaxT':VBM_ImpRelaxT, \
'Mobility':VBM_Mobility, 'Density':VBM_Density, 'Elcond':VBM_Elcond, 'Seebeck':VBM_Seebeck, 'Lorenz':VBM_Lorenz, \
'Ekappa':VBM_Ekappa, 'Hallcoeff':VBM_Hallcoeff, 'PF':VBM_PF}
self.data['VSB'] = {'TotalRelaxT':VSB_TotalRelaxT, 'AcoRelaxT':VSB_AcoRelaxT, 'OptRelaxT':VSB_OptRelaxT, 'ImpRelaxT':VSB_ImpRelaxT, \
'Mobility':VSB_Mobility, 'Density':VSB_Density, 'Elcond':VSB_Elcond, 'Seebeck':VSB_Seebeck, 'Lorenz':VSB_Lorenz, \
'Ekappa':VSB_Ekappa, 'Hallcoeff':VSB_Hallcoeff, 'PF':VSB_PF}
self.data['TVB'] = {'Mobility':TVB_Mobility, 'Density':TVB_Density, 'Elcond':TVB_Elcond, 'Seebeck':TVB_Seebeck, 'Lorenz':TVB_Lorenz, \
'Ekappa':TVB_Ekappa, 'Hallcoeff':TVB_Hallcoeff, 'PF':TVB_PF}
else:
vmux_array = find_mu_doping(self.VBM, doping, mu, Temp)
(VBM_AcoRelaxT, VBM_OptRelaxT, VBM_ImpRelaxT, VBM_TotalRelaxT, VBM_Density, VBM_Mobility,\
VBM_Elcond, VBM_Seebeck, VBM_Lorenz, VBM_Ekappa, VBM_Hallfactor, VBM_Hallcoeff, VBM_PF) = self.VBM.Get_transport_para(filepath, vmux_array, Temp)
self.data['VBM'] = {'TotalRelaxT':VBM_TotalRelaxT, 'AcoRelaxT':VBM_AcoRelaxT, 'OptRelaxT':VBM_OptRelaxT, 'ImpRelaxT':VBM_ImpRelaxT, \
'Mobility':VBM_Mobility, 'Density':VBM_Density, 'Elcond':VBM_Elcond, 'Seebeck':VBM_Seebeck, 'Lorenz':VBM_Lorenz, \
'Ekappa':VBM_Ekappa, 'Hallcoeff':VBM_Hallcoeff, 'PF':VBM_PF}
#####################################################################################################################################################################
def Output(self, Temp, doping, mode):
'''
Output the results as any file format users want. The results are firstly converted to a pandas.DataFrame object, so users can store it as any file
format pandas supports. Also, the key parameters for each band are stored in Parameter file for checking.
'''
shape = np.shape(self.data['CBM']['TotalRelaxT'])
Temp_list = np.array([])
if mode == 'standard':
mu_list = np.array([])
if mode == 'doping':
dope_list= np.array([])
CBM_density_list = np.array([])
CBM_elcond_list = np.array([])
CBM_seebeck_list = np.array([])
CBM_mobility_list = np.array([])
CBM_lorenz_list = np.array([])
CBM_ekappa_list = np.array([])
CBM_hallcoeff_list = np.array([])
CBM_pf_list = np.array([])
CBM_totalrelaxtime_list = np.array([])
CBM_acorelaxtime_list = np.array([])
CBM_optrelaxtime_list = np.array([])
CBM_imprelaxtime_list = np.array([])
VBM_density_list = np.array([])
VBM_elcond_list = np.array([])
VBM_seebeck_list = np.array([])
VBM_mobility_list = np.array([])
VBM_lorenz_list = np.array([])
VBM_ekappa_list = np.array([])
VBM_hallcoeff_list = np.array([])
VBM_pf_list = np.array([])
VBM_totalrelaxtime_list = np.array([])
VBM_acorelaxtime_list = np.array([])
VBM_optrelaxtime_list = np.array([])
VBM_imprelaxtime_list = np.array([])
if hasattr(self, 'CSB'):
CSB_density_list = np.array([])
CSB_elcond_list = np.array([])
CSB_seebeck_list = np.array([])
CSB_mobility_list = np.array([])
CSB_lorenz_list = np.array([])
CSB_ekappa_list = np.array([])
CSB_hallcoeff_list = np.array([])
CSB_pf_list = np.array([])
CSB_totalrelaxtime_list = np.array([])
CSB_acorelaxtime_list = np.array([])
CSB_optrelaxtime_list = np.array([])
CSB_imprelaxtime_list = np.array([])
TCB_density_list = np.array([])
TCB_elcond_list = np.array([])
TCB_seebeck_list = np.array([])
TCB_mobility_list = np.array([])
TCB_lorenz_list = np.array([])
TCB_ekappa_list = np.array([])
TCB_hallcoeff_list = np.array([])
TCB_pf_list = np.array([])
if hasattr(self, 'VSB'):
VSB_density_list = np.array([])
VSB_elcond_list = np.array([])
VSB_seebeck_list = np.array([])
VSB_mobility_list = np.array([])
VSB_lorenz_list = np.array([])
VSB_ekappa_list = np.array([])
VSB_hallcoeff_list = np.array([])
VSB_pf_list = np.array([])
VSB_totalrelaxtime_list = np.array([])
VSB_acorelaxtime_list = np.array([])
VSB_optrelaxtime_list = np.array([])
VSB_imprelaxtime_list = np.array([])
TVB_density_list = np.array([])
TVB_elcond_list = np.array([])
TVB_seebeck_list = np.array([])
TVB_mobility_list = np.array([])
TVB_lorenz_list = np.array([])
TVB_ekappa_list = np.array([])
TVB_hallcoeff_list = np.array([])
TVB_pf_list = np.array([])
for i in Temp:
Temp_list=np.concatenate((Temp_list, np.repeat(i, shape[1])))
if mode == 'doping':
dope_list=np.concatenate((dope_list, doping))
if mode == 'standard':
mu_list=np.concatenate((mu_list, self.mu))
for i in np.arange(shape[0]):
CBM_density_list=np.concatenate((CBM_density_list, self.data['CBM']['Density'][i,:]))
CBM_seebeck_list=np.concatenate((CBM_seebeck_list, self.data['CBM']['Seebeck'][i,:]))
CBM_mobility_list = np.concatenate((CBM_mobility_list, self.data['CBM']['Mobility'][i,:]))
CBM_elcond_list=np.concatenate((CBM_elcond_list, self.data['CBM']['Elcond'][i,:]))
CBM_lorenz_list=np.concatenate((CBM_lorenz_list, self.data['CBM']['Lorenz'][i,:]))
CBM_ekappa_list=np.concatenate((CBM_ekappa_list, self.data['CBM']['Ekappa'][i,:]))
CBM_hallcoeff_list=np.concatenate((CBM_hallcoeff_list, self.data['CBM']['Hallcoeff'][i,:]))
CBM_pf_list = np.concatenate((CBM_pf_list, self.data['CBM']['PF'][i,:]))
CBM_totalrelaxtime_list = np.concatenate((CBM_totalrelaxtime_list, self.data['CBM']['TotalRelaxT'][i,:]))
CBM_acorelaxtime_list = np.concatenate((CBM_acorelaxtime_list, self.data['CBM']['AcoRelaxT'][i,:]))
CBM_optrelaxtime_list = np.concatenate((CBM_optrelaxtime_list, self.data['CBM']['OptRelaxT'][i,:]))
CBM_imprelaxtime_list = np.concatenate((CBM_imprelaxtime_list, self.data['CBM']['ImpRelaxT'][i,:]))
VBM_density_list=np.concatenate((VBM_density_list, self.data['VBM']['Density'][i,:]))
VBM_seebeck_list=np.concatenate((VBM_seebeck_list, self.data['VBM']['Seebeck'][i,:]))
VBM_mobility_list = np.concatenate((VBM_mobility_list, self.data['VBM']['Mobility'][i,:]))
VBM_elcond_list=np.concatenate((VBM_elcond_list, self.data['VBM']['Elcond'][i,:]))
VBM_lorenz_list=np.concatenate((VBM_lorenz_list, self.data['VBM']['Lorenz'][i,:]))
VBM_ekappa_list=np.concatenate((VBM_ekappa_list, self.data['VBM']['Ekappa'][i,:]))
VBM_hallcoeff_list=np.concatenate((VBM_hallcoeff_list, self.data['VBM']['Hallcoeff'][i,:]))
VBM_pf_list = np.concatenate((VBM_pf_list, self.data['VBM']['PF'][i,:]))
VBM_totalrelaxtime_list = np.concatenate((VBM_totalrelaxtime_list, self.data['VBM']['TotalRelaxT'][i,:]))
VBM_acorelaxtime_list = np.concatenate((VBM_acorelaxtime_list, self.data['VBM']['AcoRelaxT'][i,:]))
VBM_optrelaxtime_list = np.concatenate((VBM_optrelaxtime_list, self.data['VBM']['OptRelaxT'][i,:]))
VBM_imprelaxtime_list = np.concatenate((VBM_imprelaxtime_list, self.data['VBM']['ImpRelaxT'][i,:]))
if hasattr(self, 'CSB'):
CSB_density_list=np.concatenate((CSB_density_list, self.data['CSB']['Density'][i,:]))
CSB_seebeck_list=np.concatenate((CSB_seebeck_list, self.data['CSB']['Seebeck'][i,:]))
CSB_mobility_list = np.concatenate((CSB_mobility_list, self.data['CSB']['Mobility'][i,:]))
CSB_elcond_list=np.concatenate((CSB_elcond_list, self.data['CSB']['Elcond'][i,:]))
CSB_lorenz_list=np.concatenate((CSB_lorenz_list, self.data['CSB']['Lorenz'][i,:]))
CSB_ekappa_list=np.concatenate((CSB_ekappa_list, self.data['CSB']['Ekappa'][i,:]))
CSB_hallcoeff_list=np.concatenate((CSB_hallcoeff_list, self.data['CSB']['Hallcoeff'][i,:]))
CSB_pf_list = np.concatenate((CSB_pf_list, self.data['CSB']['PF'][i,:]))
CSB_totalrelaxtime_list = np.concatenate((CSB_totalrelaxtime_list, self.data['CSB']['TotalRelaxT'][i,:]))
CSB_acorelaxtime_list = np.concatenate((CSB_acorelaxtime_list, self.data['CSB']['AcoRelaxT'][i,:]))
CSB_optrelaxtime_list = np.concatenate((CSB_optrelaxtime_list, self.data['CSB']['OptRelaxT'][i,:]))
CSB_imprelaxtime_list = np.concatenate((CSB_imprelaxtime_list, self.data['CSB']['ImpRelaxT'][i,:]))
TCB_density_list=np.concatenate((TCB_density_list, self.data['TCB']['Density'][i,:]))
TCB_seebeck_list=np.concatenate((TCB_seebeck_list, self.data['TCB']['Seebeck'][i,:]))
TCB_mobility_list = np.concatenate((TCB_mobility_list, self.data['TCB']['Mobility'][i,:]))
TCB_elcond_list=np.concatenate((TCB_elcond_list, self.data['TCB']['Elcond'][i,:]))
TCB_lorenz_list=np.concatenate((TCB_lorenz_list, self.data['TCB']['Lorenz'][i,:]))
TCB_ekappa_list=np.concatenate((TCB_ekappa_list, self.data['TCB']['Ekappa'][i,:]))
TCB_hallcoeff_list=np.concatenate((TCB_hallcoeff_list, self.data['TCB']['Hallcoeff'][i,:]))
TCB_pf_list = np.concatenate((TCB_pf_list, self.data['TCB']['PF'][i,:]))
if hasattr(self, 'VSB'):
VSB_density_list=np.concatenate((VSB_density_list, self.data['VSB']['Density'][i,:]))
VSB_seebeck_list=np.concatenate((VSB_seebeck_list, self.data['VSB']['Seebeck'][i,:]))
VSB_mobility_list = np.concatenate((VSB_mobility_list, self.data['VSB']['Mobility'][i,:]))
VSB_elcond_list=np.concatenate((VSB_elcond_list, self.data['VSB']['Elcond'][i,:]))
VSB_lorenz_list=np.concatenate((VSB_lorenz_list, self.data['VSB']['Lorenz'][i,:]))
VSB_ekappa_list=np.concatenate((VSB_ekappa_list, self.data['VSB']['Ekappa'][i,:]))
VSB_hallcoeff_list=np.concatenate((VSB_hallcoeff_list, self.data['VSB']['Hallcoeff'][i,:]))
VSB_pf_list = np.concatenate((VSB_pf_list, self.data['VSB']['PF'][i,:]))
VSB_totalrelaxtime_list = np.concatenate((VSB_totalrelaxtime_list, self.data['VSB']['TotalRelaxT'][i,:]))
VSB_acorelaxtime_list = np.concatenate((VSB_acorelaxtime_list, self.data['VSB']['AcoRelaxT'][i,:]))
VSB_optrelaxtime_list = np.concatenate((VSB_optrelaxtime_list, self.data['VSB']['OptRelaxT'][i,:]))
VSB_imprelaxtime_list = np.concatenate((VSB_imprelaxtime_list, self.data['VSB']['ImpRelaxT'][i,:]))
TVB_density_list=np.concatenate((TVB_density_list, self.data['TVB']['Density'][i,:]))
TVB_seebeck_list=np.concatenate((TVB_seebeck_list, self.data['TVB']['Seebeck'][i,:]))
TVB_mobility_list = np.concatenate((TVB_mobility_list, self.data['TVB']['Mobility'][i,:]))
TVB_elcond_list=np.concatenate((TVB_elcond_list, self.data['TVB']['Elcond'][i,:]))
TVB_lorenz_list=np.concatenate((TVB_lorenz_list, self.data['TVB']['Lorenz'][i,:]))
TVB_ekappa_list=np.concatenate((TVB_ekappa_list, self.data['TVB']['Ekappa'][i,:]))
TVB_hallcoeff_list=np.concatenate((TVB_hallcoeff_list, self.data['TVB']['Hallcoeff'][i,:]))
TVB_pf_list = np.concatenate((TVB_pf_list, self.data['TVB']['PF'][i,:]))
if mode == 'standard':
CBM_dict={"Temperature": Temp_list, "mu": mu_list, "Concentration": CBM_density_list, "Seebeck": CBM_seebeck_list, \
"Mobility": CBM_mobility_list, "Elcond": CBM_elcond_list, "Lorenz": CBM_lorenz_list, "Ekappa": CBM_ekappa_list, \
"Hallcoeff": CBM_hallcoeff_list, "PF": CBM_pf_list, "TotalRelaxTime": CBM_totalrelaxtime_list, "AcoRelaxTime": CBM_acorelaxtime_list, \
"OptRelaxTime": CBM_optrelaxtime_list, "ImpRelaxTime": CBM_imprelaxtime_list}
VBM_dict={"Temperature": Temp_list, "mu": mu_list, "Concentration": VBM_density_list, "Seebeck": VBM_seebeck_list, \
"Mobility": VBM_mobility_list, "Elcond": VBM_elcond_list, "Lorenz": VBM_lorenz_list, "Ekappa": VBM_ekappa_list, \
"Hallcoeff": VBM_hallcoeff_list, "PF": VBM_pf_list, "TotalRelaxTime": VBM_totalrelaxtime_list, "AcoRelaxTime": VBM_acorelaxtime_list, \
"OptRelaxTime": VBM_optrelaxtime_list, "ImpRelaxTime": VBM_imprelaxtime_list}
if hasattr(self, 'CSB'):
CSB_dict={"Temperature": Temp_list, "mu": mu_list, "Concentration": CSB_density_list, "Seebeck": CSB_seebeck_list, \
"Mobility": CSB_mobility_list, "Elcond": CSB_elcond_list, "Lorenz": CSB_lorenz_list, "Ekappa": CSB_ekappa_list, \
"Hallcoeff": CSB_hallcoeff_list, "PF": CSB_pf_list, "TotalRelaxTime": CSB_totalrelaxtime_list, "AcoRelaxTime": CSB_acorelaxtime_list, \
"OptRelaxTime": CSB_optrelaxtime_list, "ImpRelaxTime": CSB_imprelaxtime_list}
TCB_dict={"Temperature": Temp_list, "mu": mu_list, "Concentration": TCB_density_list, "Seebeck": TCB_seebeck_list, \
"Mobility": TCB_mobility_list, "Elcond": TCB_elcond_list, "Lorenz": TCB_lorenz_list, "Ekappa": TCB_ekappa_list, \
"Hallcoeff": TCB_hallcoeff_list, "PF": TCB_pf_list}
if hasattr(self, 'VSB'):
VSB_dict={"Temperature": Temp_list, "mu": mu_list, "Concentration": VSB_density_list, "Seebeck": VSB_seebeck_list, \
"Mobility": VSB_mobility_list, "Elcond": VSB_elcond_list, "Lorenz": VSB_lorenz_list, "Ekappa": VSB_ekappa_list, \
"Hallcoeff": VSB_hallcoeff_list, "PF": VSB_pf_list, "TotalRelaxTime": VSB_totalrelaxtime_list, "AcoRelaxTime": VSB_acorelaxtime_list, \
"OptRelaxTime": VSB_optrelaxtime_list, "ImpRelaxTime": VSB_imprelaxtime_list}
TVB_dict={"Temperature": Temp_list, "mu": mu_list, "Concentration": TVB_density_list, "Seebeck": TVB_seebeck_list, \
"Mobility": TVB_mobility_list, "Elcond": TVB_elcond_list, "Lorenz": TVB_lorenz_list, "Ekappa": TVB_ekappa_list, \
"Hallcoeff": TVB_hallcoeff_list, "PF": TVB_pf_list}
if mode == 'doping':
CBM_dict={"Temperature": Temp_list, "dope": dope_list, "Concentration": CBM_density_list, "Seebeck": CBM_seebeck_list, \
"Mobility": CBM_mobility_list, "Elcond": CBM_elcond_list, "Lorenz": CBM_lorenz_list, "Ekappa": CBM_ekappa_list, \
"Hallcoeff": CBM_hallcoeff_list, "PF": CBM_pf_list, "TotalRelaxTime": CBM_totalrelaxtime_list, "AcoRelaxTime": CBM_acorelaxtime_list, \
"OptRelaxTime": CBM_optrelaxtime_list, "ImpRelaxTime": CBM_imprelaxtime_list}
VBM_dict={"Temperature": Temp_list, "dope": dope_list, "Concentration": VBM_density_list, "Seebeck": VBM_seebeck_list, \
"Mobility": VBM_mobility_list, "Elcond": VBM_elcond_list, "Lorenz": VBM_lorenz_list, "Ekappa": VBM_ekappa_list, \
"Hallcoeff": VBM_hallcoeff_list, "PF": VBM_pf_list, "TotalRelaxTime": VBM_totalrelaxtime_list, "AcoRelaxTime": VBM_acorelaxtime_list, \
"OptRelaxTime": VBM_optrelaxtime_list, "ImpRelaxTime": VBM_imprelaxtime_list}
if hasattr(self, 'CSB'):
CSB_dict={"Temperature": Temp_list, "dope": dope_list, "Concentration": CSB_density_list, "Seebeck": CSB_seebeck_list, \
"Mobility": CSB_mobility_list, "Elcond": CSB_elcond_list, "Lorenz": CSB_lorenz_list, "Ekappa": CSB_ekappa_list, \
"Hallcoeff": CSB_hallcoeff_list, "PF": CSB_pf_list, "TotalRelaxTime": CSB_totalrelaxtime_list, "AcoRelaxTime": CSB_acorelaxtime_list, \
"OptRelaxTime": CSB_optrelaxtime_list, "ImpRelaxTime": CSB_imprelaxtime_list}
TCB_dict={"Temperature": Temp_list, "dope": dope_list, "Concentration": TCB_density_list, "Seebeck": TCB_seebeck_list, \
"Mobility": TCB_mobility_list, "Elcond": TCB_elcond_list, "Lorenz": TCB_lorenz_list, "Ekappa": TCB_ekappa_list, \
"Hallcoeff": TCB_hallcoeff_list, "PF": TCB_pf_list}
if hasattr(self, 'VSB'):
VSB_dict={"Temperature": Temp_list, "dope": dope_list, "Concentration": VSB_density_list, "Seebeck": VSB_seebeck_list, \
"Mobility": VSB_mobility_list, "Elcond": VSB_elcond_list, "Lorenz": VSB_lorenz_list, "Ekappa": VSB_ekappa_list, \
"Hallcoeff": VSB_hallcoeff_list, "PF": VSB_pf_list, "TotalRelaxTime": VSB_totalrelaxtime_list, "AcoRelaxTime": VSB_acorelaxtime_list, \
"OptRelaxTime": VSB_optrelaxtime_list, "ImpRelaxTime": VSB_imprelaxtime_list}
TVB_dict={"Temperature": Temp_list, "dope": dope_list, "Concentration": TVB_density_list, "Seebeck": TVB_seebeck_list, \
"Mobility": TVB_mobility_list, "Elcond": TVB_elcond_list, "Lorenz": TVB_lorenz_list, "Ekappa": TVB_ekappa_list, \
"Hallcoeff": TVB_hallcoeff_list, "PF": TVB_pf_list}
CBM_FILE = pd.DataFrame(CBM_dict)
VBM_FILE = pd.DataFrame(VBM_dict)
CBM_FILE.to_excel('CBM.xlsx', index_label='index', merge_cells=False)
VBM_FILE.to_excel('VBM.xlsx', index_label='index', merge_cells=False)
fp = open('Parameters', 'w')
fp.write('band: m_parallel m_vertical m_cond m_dos E N Bandgap c epsilon_inf epsilon_0 %s' % os.linesep)
fp.write("CBM: %.4f %.4f %.4f %.4f %.4f %d %.4f %.4f %.4e %.4e %s" % (self.CBM.RT.ACO.EMC.parallelmass, self.CBM.RT.ACO.EMC.verticalmass,
self.CBM.RT.ACO.EMC.condeffmass, self.CBM.RT.ACO.doseffmass, self.CBM.RT.ACO.DPC.value, self.CBM.RT.ACO.N, self.CBM.RT.ACO.Bandgap,
self.CBM.RT.ACO.Elastic.value, self.CBM.RT.OPT.Diel.electron, self.CBM.RT.OPT.Diel.static, os.linesep))
fp.write("VBM: %.4f %.4f %.4f %.4f %.4f %d %.4f %.4f %.4e %.4e %s" % (self.VBM.RT.ACO.EMC.parallelmass, self.VBM.RT.ACO.EMC.verticalmass,
self.VBM.RT.ACO.EMC.condeffmass, self.VBM.RT.ACO.doseffmass, self.VBM.RT.ACO.DPC.value, self.VBM.RT.ACO.N, self.VBM.RT.ACO.Bandgap,
self.VBM.RT.ACO.Elastic.value, self.VBM.RT.OPT.Diel.electron, self.VBM.RT.OPT.Diel.static, os.linesep))
if hasattr(self, 'CSB'):
CSB_FILE = pd.DataFrame(CSB_dict)
TCB_FILE = pd.DataFrame(TCB_dict)
CSB_FILE.to_excel('CSB.xlsx', index_label='index', merge_cells=False)
TCB_FILE.to_excel('TCB.xlsx', index_label='index', merge_cells=False)
fp.write("CSB: %.4f %.4f %.4f %.4f %.4f %d %.4f %.4f %.4e %.4e %s" % (self.CSB.RT.ACO.EMC.parallelmass, self.CSB.RT.ACO.EMC.verticalmass,
self.CSB.RT.ACO.EMC.condeffmass, self.CSB.RT.ACO.doseffmass, self.CSB.RT.ACO.DPC.value, self.CSB.RT.ACO.N, self.CSB.RT.ACO.Bandgap,
self.CSB.RT.ACO.Elastic.value, self.CSB.RT.OPT.Diel.electron, self.CSB.RT.OPT.Diel.static, os.linesep))
if hasattr(self, 'VSB'):
VSB_FILE = pd.DataFrame(VSB_dict)
TVB_FILE = pd.DataFrame(TVB_dict)
VSB_FILE.to_excel('VSB.xlsx', index_label='index', merge_cells=False)
TVB_FILE.to_excel('TVB.xlsx', index_label='index', merge_cells=False)
fp.write("VSB: %.4f %.4f %.4f %.4f %.4f %d %.4f %.4f %.4e %.4e %s" % (self.VSB.RT.ACO.EMC.parallelmass, self.VSB.RT.ACO.EMC.verticalmass,
self.VSB.RT.ACO.EMC.condeffmass, self.VSB.RT.ACO.doseffmass, self.VSB.RT.ACO.DPC.value, self.VSB.RT.ACO.N, self.VSB.RT.ACO.Bandgap,
self.VSB.RT.ACO.Elastic.value, self.VSB.RT.OPT.Diel.electron, self.VSB.RT.OPT.Diel.static, os.linesep))
fp.close()
return | AICON | /AICON-2.0.1-py3-none-any.whl/aicon/electron.py | electron.py |
import os
import numpy as np
from fireworks import explicit_serialize, FiretaskBase, FWAction
from pymatgen.core import Structure
from pymatgen.io.vasp.inputs import Incar
from pymatgen.io.vasp.outputs import Oszicar,VaspParserError
from pymatgen.io.vasp.sets import MPStaticSet, MPNonSCFSet, MPRelaxSet
from pymatgen.electronic_structure.core import Spin
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from atomate.common.firetasks.glue_tasks import get_calc_loc, CopyFiles
from atomate.utils.utils import env_chk
from aicon.tools import Generate_kpoints, Write_INPCAR, get_highsympath
from aicon.electron import Electron, Get_Electron
from aicon.myemc import EffectMass
from aicon.phonon import Get_Phonon
import phonopy
from phonopy.interface.phonopy_yaml import PhonopyYaml
from phonopy.interface.calculator import get_default_physical_units
from phonopy.phonon.band_structure import get_band_qpoints
from phonopy.interface.vasp import read_vasp, create_FORCE_CONSTANTS, write_supercells_with_displacements
from phonopy import PhonopyGruneisen
@explicit_serialize
class CheckOptimization(FiretaskBase):
"""
Check if structure optimization is undergoing fully. The critia is the ionic step
in OSZICAR should be 1. if not, copy CONTCAR to POSCAR and optimize again.
Required params:
(none)
Optional params:
(As for MyOptimizeFW:
["vasp_input_set", "vasp_cmd", "db_file", "name", "count", "kwargs"])
"""
optional_params = ["vasp_input_set", "vasp_input_set_params", "vasp_cmd", "db_file", "name", "count", "kwargs"]
_fw_name = "Check Optimization"
def run_task(self, fw_spec):
try:
OSZICAR = Oszicar('OSZICAR')
except VaspParserError as err:
print(err)
else:
if len(OSZICAR.ionic_steps) == 1:
new_name = "{}-{}".format(self.get("name"), "final")
calc_locs = list(fw_spec.get("calc_locs", []))
calc_locs.append({"name": new_name,
"filesystem": env_chk(self.get('filesystem', None), fw_spec),
"path": self.get("path", os.getcwd())})
return FWAction(mod_spec=[{'_push_all': {'calc_locs': calc_locs}}])
else:
stru = Structure.from_file("CONTCAR")
kpoint_set = Generate_kpoints(stru, 0.03)
if self.get("vasp_input_set", None) is not None:
vasp_input_set_temp = self.get("vasp_input_set")
tempdict = dict(vasp_input_set_temp.incar.items())
vasp_input_set = MPRelaxSet(stru, user_incar_settings=tempdict, user_kpoints_settings=kpoint_set)
else:
vasp_input_set_params = self.get("vasp_input_set_params")
vasp_input_set = MPRelaxSet(stru, user_incar_settings=vasp_input_set_params, user_kpoints_settings=kpoint_set)
vasp_cmd = self.get("vasp_cmd")
db_file = self.get("db_file")
name = self.get("name")
count = self.get("count")
kwargs = self.get("kwargs", {})
calc_locs = list(fw_spec.get("calc_locs", []))
calc_locs.append({"name": "{}-{}".format(name, str(count)),
"filesystem": env_chk(self.get('filesystem', None), fw_spec),
"path": self.get("path", os.getcwd())})
count = count + 1
from aicon.myfireworks import MyOptimizeFW
new_fw = MyOptimizeFW(structure=stru, vasp_input_set=vasp_input_set, vasp_cmd=vasp_cmd,
db_file=db_file, name=name, count=count, **kwargs)
return FWAction(mod_spec=[{'_push_all': {'calc_locs': calc_locs}}], detours=new_fw)
@explicit_serialize
class WriteVaspStaticFromPrev(FiretaskBase):
"""
Writes input files for a static run. Assumes that output files from a previous
(e.g., optimization) run can be accessed in current dir or prev_calc_dir. Also allows
lepsilon (dielectric constant) calcs.
Required params:
(none)
Optional params:
(documentation for all other optional params can be found in MPStaticSet)
"""
optional_params = ["prev_calc_dir", "user_incar_settings", "user_kpoints_settings",
"standardize", "sym_prec", "lepsilon", "supercell", "other_params"]
_fw_name = "Write Static"
def run_task(self, fw_spec):
unitcell = Structure.from_file("POSCAR")
supercell = self.get("supercell", None)
if supercell is not None:
os.system('cp POSCAR POSCAR-unitcell')
unitcell.make_supercell(supercell)
lepsilon = self.get("lepsilon", False)
standardize = self.get("standardize", False)
other_params = self.get("other_params", {})
user_incar_settings = self.get("user_incar_settings", {})
finder = SpacegroupAnalyzer(unitcell)
prims = finder.get_primitive_standard_structure()
# for lepsilon runs, the kpoints should be denser
if lepsilon:
kpoint_set = Generate_kpoints(prims, 0.02)
struct = prims
elif standardize:
kpoint_set = Generate_kpoints(prims, 0.03)
struct = prims
else:
kpoint_set = Generate_kpoints(unitcell, 0.03)
struct = unitcell
vis = MPStaticSet(struct,
user_incar_settings=user_incar_settings,
user_kpoints_settings=kpoint_set,
sym_prec=self.get("sym_prec", 0.1),
lepsilon=lepsilon, **other_params)
vis.write_input(".")
@explicit_serialize
class WriteVaspNSCFFromPrev(FiretaskBase):
"""
Writes input files for an NSCF static run. Assumes that output files from an
scf job can be accessed. There are many options, e.g. uniform mode,
line mode, adding the optical properties, etc.
Required params:
(none)
Optional params:
(documentation for all optional params can be found in NonSCFVaspInputSet)
"""
required_params = []
optional_params = ["prev_calc_dir", "copy_chgcar", "nbands_factor", "reciprocal_density",
"kpoints_line_density", "small_gap_multiply", "standardize", "sym_prec",
"mode", "other_params", "user_incar_settings"]
_fw_name = "Write NSCF"
def run_task(self, fw_spec):
vis = MPNonSCFSet.from_prev_calc(
prev_calc_dir=self.get("prev_calc_dir", "."),
copy_chgcar=self.get("copy_chgcar", False),
user_incar_settings=self.get("user_incar_settings", {}),
nbands_factor=self.get("nbands_factor", 2),
reciprocal_density=self.get("reciprocal_density", 100),
kpoints_line_density=self.get("kpoints_line_density", 30),
small_gap_multiply=self.get("small_gap_multiply", None),
standardize=self.get("standardize", False),
sym_prec=self.get("sym_prec", 0.1),
mode=self.get("mode", "line"),
**self.get("other_params", {}))
vis.write_input(".")
@explicit_serialize
class WriteEMCInput(FiretaskBase):
"""
Write INPCAR file for emc and KPOINTS.
Required params:
(["bnd_name", "calc_loc"])
Optional params:
(["step_size"])
"""
required_params = ["bnd_name", "calc_loc"]
optional_params = ["step_size"]
_fw_name = "Write EMCInput"
def run_task(self, fw_spec):
calc_loc = get_calc_loc(self["calc_loc"],fw_spec["calc_locs"]) if self.get("calc_loc") else {}
filepath = calc_loc['path']
C = Electron()
C.Get_bandstru(filepath)
if self["bnd_name"] == "CBM":
coord = C.engband.get_cbm()['kpoint'].frac_coords
bnd_num = np.min(C.engband.get_cbm()['band_index'][Spin.up]) + 1
elif self["bnd_name"] == "VBM":
coord = C.engband.get_vbm()['kpoint'].frac_coords
bnd_num = np.max(C.engband.get_vbm()['band_index'][Spin.up]) + 1
elif self["bnd_name"] == "CSB":
C.Get_SB()
if hasattr(C, 'CSB'):
coord = C.engband.kpoints[C.CSB.pos["kptindex"]].frac_coords
bnd_num = np.min(C.engband.get_cbm()['band_index'][Spin.up]) + 1
else:
return FWAction(exit=True)
elif self["bnd_name"] == "VSB":
C.Get_SB()
if hasattr(C, 'VSB'):
coord = C.engband.kpoints[C.VSB.pos["kptindex"]].frac_coords
bnd_num = np.max(C.engband.get_vbm()['band_index'][Spin.up]) + 1
else:
return FWAction(exit=True)
else:
raise ValueError("Must specify bnd_name")
lattice = C.engband.structure.lattice.matrix
Write_INPCAR(coord, self["step_size"], bnd_num, "V", lattice)
EMC = EffectMass()
inpcar_fh = open('INPCAR', 'r')
(kpt, stepsize, band, prg, basis) = EMC.parse_inpcar(inpcar_fh)
EMC.get_kpointsfile(kpt, stepsize, prg, basis)
@explicit_serialize
class WriteVaspForDeformedCrystal(FiretaskBase):
"""
Overwrite INCAR and POSCAR for a structure optimization for deformed crystal.
Assumes that output files from a previous (e.g., optimization) run can be accessed in current dir or prev_calc_dir.
Required params:
(["strain", "user_incar_settings"])
Optional params:
(None)
"""
required_params = ["strain", "user_incar_settings"]
_fw_name = "Write Vasp For Deformed Crystal"
def run_task(self, fw_spec):
strain = self.get("strain", 0.0)
user_incar_settings = self.get("user_incar_settings", {})
struct = Structure.from_file("POSCAR")
struct.apply_strain(strain)
INCAR = Incar(user_incar_settings)
struct.to(filename="POSCAR")
INCAR.write_file("INCAR")
@explicit_serialize
class WritePhononBand(FiretaskBase):
"""
Write 2nd FORCE CONSTANT file and band.yaml file.
Required params:
(["supercell"])
Optional params:
(None)
"""
_fw_name = "Write Phonon Band"
required_params = ["supercell"]
def run_task(self, fw_spec):
create_FORCE_CONSTANTS("vasprun.xml", False, 1)
(Keylist,Coordslist,prims,transmat) = get_highsympath("POSCAR-unitcell")
phonon = phonopy.load(supercell_matrix=self.get("supercell"),
primitive_matrix=transmat,
unitcell_filename="POSCAR-unitcell",
calculator="vasp",
is_nac=False,
force_constants_filename="FORCE_CONSTANTS")
points = get_band_qpoints([np.array(Coordslist)], 51)
phonon.run_band_structure(points, with_group_velocities=True)
phonon.write_yaml_band_structure()
@explicit_serialize
class BuildAICONDir(FiretaskBase):
"""
Build the directory for AICON calculation, the name of each subdirectory is specific
and should not be changed.
Required params:
(None)
Optional params:
(None)
"""
_fw_name = "Build AICON Directory"
def run_task(self, fw_spec):
files_to_copy = ["POSCAR", "INCAR", "KPOINTS", "vasprun.xml", "OUTCAR", "POTCAR"]
files_to_copy_add = ["POSCAR", "INCAR", "KPOINTS", "vasprun.xml", "OUTCAR", "INPCAR", "EIGENVAL"]
path_dict = dict()
calc_loc_equi = get_calc_loc("equi nscf",fw_spec["calc_locs"])
path_dict["equi"]=calc_loc_equi["path"]
calc_loc_05 = get_calc_loc("0.5per nscf",fw_spec["calc_locs"])
path_dict["0.5per"]=calc_loc_05["path"]
calc_loc_10 = get_calc_loc("1.0per nscf",fw_spec["calc_locs"])
path_dict["1.0per"]=calc_loc_10["path"]
calc_loc_dielect = get_calc_loc("dielectric",fw_spec["calc_locs"])
path_dict["dielect"]=calc_loc_dielect["path"]
calc_loc_elastic = get_calc_loc("elastic",fw_spec["calc_locs"])
path_dict["elastic"]=calc_loc_elastic["path"]
calc_loc_CBM = get_calc_loc("CBM",fw_spec["calc_locs"])
path_dict["CBM"]=calc_loc_CBM["path"]
calc_loc_VBM = get_calc_loc("VBM",fw_spec["calc_locs"])
path_dict["VBM"]=calc_loc_VBM["path"]
try:
calc_loc_CSB = get_calc_loc("CSB",fw_spec["calc_locs"])
except ValueError as err:
print(err)
else:
path_dict["CSB"]=calc_loc_CSB["path"]
try:
calc_loc_VSB = get_calc_loc("VSB",fw_spec["calc_locs"])
except ValueError as err:
print(err)
else:
path_dict["VSB"]=calc_loc_VSB["path"]
curr_dir = os.getcwd()
for key, path in path_dict.items():
new_dir = os.path.join(curr_dir, key)
os.makedirs(new_dir)
if key not in ["CBM", "VBM", "CSB", "VSB"]:
copy = CopyFiles(from_dir=path, to_dir=new_dir, files_to_copy=files_to_copy)
copy.run_task(fw_spec)
else:
copy = CopyFiles(from_dir=path, to_dir=new_dir, files_to_copy=files_to_copy_add)
copy.run_task(fw_spec)
@explicit_serialize
class BuildPhonopyDir(FiretaskBase):
"""
Build the directory for gruneisen parameters calculation, the name of each subdirectory is specific
and should not be changed.
Required params:
(["supercell"])
Optional params:
(None)
"""
_fw_name = "Build Phonopy Directory"
required_params = ["supercell"]
def run_task(self, fw_spec):
files_to_copy = ["POSCAR-unitcell", "INCAR", "vasprun.xml", "FORCE_CONSTANTS", "band.yaml"]
path_dict = dict()
calc_loc_orig = get_calc_loc("orig phonon band",fw_spec["calc_locs"])
path_dict["orig"]=calc_loc_orig["path"]
calc_loc_minus = get_calc_loc("minus phonon band",fw_spec["calc_locs"])
path_dict["minus"]=calc_loc_minus["path"]
calc_loc_plus = get_calc_loc("plus phonon band",fw_spec["calc_locs"])
path_dict["plus"]=calc_loc_plus["path"]
curr_dir = os.getcwd()
for key, path in path_dict.items():
new_dir = os.path.join(curr_dir, key)
os.makedirs(new_dir)
copy = CopyFiles(from_dir=path, to_dir=new_dir, files_to_copy=files_to_copy)
copy.run_task(fw_spec)
phonons = {}
for vol in ("orig", "plus", "minus"):
(Keylist,Coordslist,prims,transmat) = get_highsympath("%s/POSCAR-unitcell" % vol)
phonon = phonopy.load(supercell_matrix=self.get("supercell"),
primitive_matrix=transmat,
unitcell_filename="%s/POSCAR-unitcell" % vol,
calculator="vasp",
is_nac=False,
force_constants_filename="%s/FORCE_CONSTANTS" % vol)
phonons[vol] = phonon
gruneisen = PhonopyGruneisen(phonons["orig"],
phonons["plus"],
phonons["minus"])
bands=get_band_qpoints([np.array(Coordslist)], 51)
gruneisen.set_band_structure(bands)
gruneisen.write_yaml_band_structure()
@explicit_serialize
class RunAICONForElec(FiretaskBase):
"""
Run AICON to calculate electrical conductivity.
Required params:
(["mode", "Temp", "Doping", "ifSB"])
"""
required_params = ["mode", "Temp", "Doping", "ifSB"]
_fw_name = "Run AICON For Elec"
def run_task(self, fw_spec):
mode = self.get("mode", "standard")
Temp = self.get("Temp", [300])
Doping = self.get("Doping", [1e19])
ifSB = self.get("ifSB", True)
Get_Electron("./", Temp, Doping, mode, ifSB)
@explicit_serialize
class RunAICONForPhon(FiretaskBase):
"""
Run AICON to calculate lattice thermal conductivity.
Required params:
(["Temp", "ifscale"])
"""
required_params = ["Temp", "ifscale"]
_fw_name = "Run AICON For Phon"
def run_task(self, fw_spec):
Temp = self.get("Temp", [300])
os.system('cp orig/band.yaml ./')
os.system('cp orig/POSCAR-unitcell ./POSCAR')
ifscale = self.get("ifscale", False)
Get_Phonon("./", Temp, ifscale)
@explicit_serialize
class WriteSupercellWithDisp(FiretaskBase):
"""
Write Supercells with displacement, combine with finite difference method.
Required params:
(["supercell"])
"""
_fw_name = "Write Supercell With Displacement"
required_params = ["supercell"]
def run_task(self, fw_spec):
unitcell = read_vasp("POSCAR-unitcell")
phonon = phonopy.Phonopy(unitcell, self.get("supercell"))
supercell = phonon.get_supercell()
phonon.generate_displacements()
supercells = phonon.supercells_with_displacements
ids = np.arange(len(supercells)) + 1
write_supercells_with_displacements(supercell, supercells, ids)
units = get_default_physical_units("vasp")
phpy_yaml = PhonopyYaml(physical_units=units,
settings={'force_sets': False,
'born_effective_charge': False,
'dielectric_constant': False,
'displacements': True})
phpy_yaml.set_phonon_info(phonon)
with open("phonopy_disp.yaml", 'w') as w:
w.write(str(phpy_yaml)) | AICON | /AICON-2.0.1-py3-none-any.whl/aicon/myfiretasks.py | myfiretasks.py |
import scipy.constants
from scipy.integrate import quad
c = scipy.constants.c
C_e = scipy.constants.e
def Upperlimit(x):
if x < 0:
return 100
else:
return x + 100
class Hallcoeff(object):
'''Hall coefficient class'''
def __init__(self, flag, RelaxTime):
self.value = 0.0
self.flag = flag
self.RelaxT = RelaxTime
def Get_AK(self):
''' Calculate effective mass anisotropy factor. '''
K = self.RelaxT.ACO.EMC.parallelmass / self.RelaxT.ACO.EMC.verticalmass
self.A_K = 3 * K * (K + 2) / (2 * K + 1)**2
def Get_A_NP(self):
''' Calculate Hall factor with non-parabolic approximation.'''
fun1 = lambda z, x, T: (1e14 * self.Totaltime(z, T))**2 * (-self.RelaxT.DfermidistrFun(z,x)) * self.RelaxT.ACO.Moment(z, T)**3 / (1 + 2 * self.RelaxT.Beta(T) * z)**2
self.integfun1 = lambda x, T: quad(fun1, 0, Upperlimit(x), args=(x, T))[0]
def Get_A_P(self):
''' Calculate Hall factor with parabolic approximation. '''
fun1 = lambda z, x, T: 1e14 * self.Totaltime(z, T) * (-self.RelaxT.DfermidistrFun(z,x)) * self.RelaxT.ACO.Moment(z, T)**3
fun2 = lambda z, x, T: (1e14 * self.Totaltime(z, T))**2 * (-self.RelaxT.DfermidistrFun(z,x)) * self.RelaxT.ACO.Moment(z, T)**3
fun3 = lambda z, x, T: (-self.RelaxT.DfermidistrFun(z,x)) * self.RelaxT.ACO.Moment(z, T)**3
self.A = lambda x, T: self.A_K * quad(fun2, 0, Upperlimit(x), args=(x, T))[0] * quad(fun3, 0, Upperlimit(x), args=(x, T))[0] / (quad(fun1, 0, Upperlimit(x), args=(x, T))[0])**2
def Get_hallcoeff(self, ACO = True, ACO_P = False, OPT = False, OPT_P = False, IMP = False, IMP_P = False):
'''
Calculate Hall coefficient.
Parameters:
----------
ACO: bool
If consider the acoustic phonon scattering in total relaxation time.
ACO_P: bool
If consider the acoustic phonon scattering with parabolic approximation in total relaxation time.
'''
fun1 = lambda z, T: 0
fun2 = lambda z, T: 0
fun3 = lambda z, T: 0
if ACO == True or ACO_P == True:
fun1 = lambda z, T: 1/self.RelaxT.ACO.Acotime(z, T)
if OPT == True or OPT_P == True:
if self.RelaxT.OPT.Diel.ion == 0:
pass
else:
fun2 = lambda z, T: 1/self.RelaxT.OPT.Opttime(z, T)
if IMP == True or IMP_P == True:
fun3 = lambda z, T: 1/self.RelaxT.IMP.Imptime(z, T)
self.Totaltime = lambda z, T: 1 / (fun1(z, T) + fun2(z, T) + fun3(z, T))
self.Get_AK()
self.Get_A_NP()
# self.hallcoeff = lambda x, T: self.A(x, T) / (C_e * density(x, T)) | AICON | /AICON-2.0.1-py3-none-any.whl/aicon/hallcoeff.py | hallcoeff.py |
import numpy as np
import scipy.constants
from pymatgen.io.vasp import BSVasprun
from aicon.relaxtime import AcoRelaxTime, AcoRelaxTime_Para, ImpurityRelaxTime, TotalRelaxTime
from aicon.seebeck import Seebeck
from aicon.ekappa import Ekappa
from aicon.hallcoeff import Hallcoeff
m_e = scipy.constants.m_e
C_e = scipy.constants.e
Boltzm = scipy.constants.Boltzmann
EPlanck = scipy.constants.physical_constants['Planck constant over 2 pi in eV s'][0]
EBoltzm = scipy.constants.physical_constants['Boltzmann constant in eV/K'][0]
EtoJoul = scipy.constants.physical_constants['joule-electron volt relationship'][0]
class Band(object):
'''This class represents one band edge considered in transport property calculation'''
def __init__(self, gap, degeneracy, isCBM = False, isVBM = False, isCSB = False, isVSB = False, **pos):
'''
Parameters:
----------
gap: float
band gap, the unit is eV.
degeneracy: int
the band multiplicity due to the symmetry.
\*\*pos: dict
The position of band edge, including kpoint index and band number index.
'''
self.value = 0.0
if isCBM is True:
self.flag = "CBM"
elif isVBM is True:
self.flag = "VBM"
elif isCSB is True:
self.flag = "CSB"
elif isVSB is True:
self.flag = "VSB"
else:
pass #here should raise a warning
self.bandgap = gap
self.degeneracy = degeneracy
self.pos = pos
def __get__(self, obj, typ = None):
return self.value
def __str__(self):
return '%.2f' % self.value
__repr__ = __str__
def Get_gap_degeneracy(self, filepath):
vaspband = BSVasprun(filepath + "/vasprun.xml")
bandstru = vaspband.get_band_structure(kpoints_filename=filepath+"/KPOINTS",line_mode=True)
self.bandgap = bandstru.get_band_gap()['energy']
self.degeneracy = bandstru.get_kpoint_degeneracy(bandstru.kpoints[self.pos['kptindex']].frac_coords)
def Get_relaxtime(self, filepath):
if self.flag == 'CSB' or self.flag == 'VSB':
self.RT = TotalRelaxTime(self.flag, self.degeneracy, self.bandgap, self.pos['bndindex'], self.pos['kptindex'])
self.RT.Get_Totaltime(filepath, ACO = True, OPT = True, IMP = True)
else:
self.RT = TotalRelaxTime(self.flag, self.degeneracy, self.bandgap, self.pos['bndindex'], self.pos['kptindex'])
self.RT.Get_Totaltime(filepath, ACO = True, OPT = True, IMP = True) #unit: second
def Get_mobility(self, filepath):
if not hasattr(self, 'RT'):
self.Get_relaxtime(filepath)
self.Mobility = lambda x, T: C_e * self.RT.Totaltime(x, T) / (self.RT.Avgeffmass(x,T) * m_e) #unit: m^2 * V^-1 * S^-1
def Get_carridensity(self, filepath):
if not hasattr(self, 'RT'):
self.Get_relaxtime(filepath)
if self.flag == 'CSB' or self.flag == 'VSB':
self.Density = lambda x, T: EtoJoul**(3/2) * (2 * np.abs(self.RT.doseffmass) * m_e * EBoltzm * T)**(3/2) / (3 * np.pi**2 * EPlanck**3) \
* self.RT.integral(x, T, 0, 3/2, 0)
else:
self.Density = lambda x, T: EtoJoul**(3/2) * (2 * np.abs(self.RT.doseffmass) * m_e * EBoltzm * T)**(3/2) / (3 * np.pi**2 * EPlanck**3) \
* self.RT.integral(x, T, 0, 3/2, 0) #unit: m^-3
def Get_eleconduct(self, filepath):
if not hasattr(self, 'Mobility'):
self.Get_mobility(filepath)
if not hasattr(self, 'Density'):
self.Get_carridensity(filepath)
self.Elcond = lambda x, T: C_e * self.Density(x, T) * self.Mobility(x, T) #unit: 1/m*ohm
def Get_seebeck(self, filepath):
if not hasattr(self, 'RT'):
self.Get_relaxtime(filepath)
self.Seebeck = Seebeck(self.flag, self.RT)
self.Seebeck.Get_seebeck(ACO = True, OPT = True, IMP = False) #unit: V/K
def Get_ekappa(self, filepath):
if not hasattr(self, 'RT'):
self.Get_relaxtime(filepath)
self.Ekappa = Ekappa(self.flag, self.RT)
self.Ekappa.Get_ekappa(ACO = True, OPT = True, IMP = False) #unit: W/m*K
def Get_hallcoeff(self, filepath):
if not hasattr(self, 'RT'):
self.Get_relaxtime(filepath)
self.Hallcoeff = Hallcoeff(self.flag, self.RT)
self.Hallcoeff.Get_hallcoeff(ACO = True, OPT = True, IMP = False) #unit: m^3/C
def Get_transport_para(self, filepath, mu, Temp):
'''Calculate electronic transport parameters and return the values. '''
AcoRelaxT = np.zeros(np.shape(mu))
OptRelaxT = np.zeros(np.shape(mu))
ImpRelaxT = np.zeros(np.shape(mu))
Moment = np.zeros(np.shape(mu))
TotalRelaxT = np.zeros(np.shape(mu))
Avgeffmass = np.zeros(np.shape(mu))
Density = np.zeros(np.shape(mu))
Mobility = np.zeros(np.shape(mu))
Elcond = np.zeros(np.shape(mu))
TemporIntegration = np.zeros(np.shape(mu))
SEEBECK = np.zeros(np.shape(mu))
Lorenz = np.zeros(np.shape(mu))
EKAPPA = np.zeros(np.shape(mu))
Hallfactor = np.zeros(np.shape(mu))
HALLCOEFF = np.zeros(np.shape(mu))
PF = np.zeros(np.shape(mu))
if not hasattr(self, 'RT'):
self.RT = TotalRelaxTime(self.flag, self.degeneracy, self.bandgap, self.pos['bndindex'], self.pos['kptindex'])
self.RT.Get_Totaltime(filepath, ACO = True, OPT = True, IMP = True)
if not hasattr(self, 'Density'):
self.Density = lambda x, T: EtoJoul**(3/2) * (2 * np.abs(self.RT.doseffmass) * m_e * EBoltzm * T)**(3/2) / (3 * np.pi**2 * EPlanck**3) \
* self.RT.integral(x, T, 0, 3/2, 0)
self.Seebeck = Seebeck(self.flag, self.RT)
self.Seebeck.Get_seebeck(ACO = True, OPT = True, IMP = False)
self.Ekappa = Ekappa(self.flag, self.RT)
self.Ekappa.Get_ekappa(ACO = True, OPT = True, IMP = False)
self.Hallcoeff = Hallcoeff(self.flag, self.RT)
self.Hallcoeff.Get_hallcoeff(ACO = True, OPT = True, IMP = False)
for i, T in enumerate(Temp):
for j, x in enumerate(mu[i]):
(Moment[i,j],AcoRelaxT[i,j],Avgeffmass[i,j]) = self.RT.ACO.Get_values(x, T)
OptRelaxT[i,j] = self.RT.OPT.Get_values(x, T, Moment[i,j])
Density[i,j] = self.Density(x, T)
ImpRelaxT[i,j] = self.RT.IMP.Get_values(x, T, Moment[i,j], Density[i,j])
TotalRelaxT[i,j] = 1.0/(1.0/AcoRelaxT[i,j] + 1.0/OptRelaxT[i,j] + 1.0/ImpRelaxT[i,j])
Mobility[i,j] = C_e * TotalRelaxT[i,j] / (Avgeffmass[i,j] * m_e)
Elcond[i,j] = C_e * Density[i,j] * Mobility[i,j]
TemporIntegration[i,j] = self.Seebeck.integfun2(x, T)
SEEBECK[i,j] = Boltzm / C_e * self.Seebeck.integfun1(x, T) / TemporIntegration[i,j]
Lorenz[i,j] = (Boltzm / C_e)**2 * (self.Ekappa.integfun1(x, T) / TemporIntegration[i,j] - (self.Ekappa.integfun2(x, T) / TemporIntegration[i,j])**2)
EKAPPA[i,j] = Lorenz[i,j] * Elcond[i,j] * T
Hallfactor[i,j] = self.Hallcoeff.A_K * self.Hallcoeff.integfun1(x, T) * Moment[i,j] / TemporIntegration[i,j]**2
HALLCOEFF[i,j] = Hallfactor[i,j] / (C_e * Density[i,j])
PF[i,j] = SEEBECK[i,j]**2 * Elcond[i,j]
return AcoRelaxT, OptRelaxT, ImpRelaxT, TotalRelaxT, Density, Mobility,\
Elcond, SEEBECK, Lorenz, EKAPPA, Hallfactor, HALLCOEFF, PF | AICON | /AICON-2.0.1-py3-none-any.whl/aicon/band.py | band.py |
import scipy.constants
from scipy.integrate import quad
Boltzm = scipy.constants.Boltzmann
C_e = scipy.constants.e
def Upperlimit(x):
if x < 0:
return 100
else:
return x + 100
class Seebeck(object):
'''Seebeck coefficient class'''
def __init__(self, flag, RelaxTime):
self.value = 0.0
self.flag = flag
self.RelaxT = RelaxTime
def Get_seebeck_NP(self):
''' Calculate Seebeck coefficient with non-parabolic approximation. '''
fun1 = lambda z, x, T: 1e14 * self.Totaltime(z, T) * (z - x) * (-self.RelaxT.DfermidistrFun(z,x)) * self.RelaxT.ACO.Moment(z, T)**3 / (1 + 2 * self.RelaxT.Beta(T) * z)
fun2 = lambda z, x, T: 1e14 * self.Totaltime(z, T) * (-self.RelaxT.DfermidistrFun(z,x)) * self.RelaxT.ACO.Moment(z, T)**3 / (1 + 2 * self.RelaxT.Beta(T) * z)
self.integfun1 = lambda x, T: quad(fun1, 0, Upperlimit(x), args=(x, T))[0]
self.integfun2 = lambda x, T: quad(fun2, 0, Upperlimit(x), args=(x, T))[0]
def Get_seebeck_P(self):
''' Calculate Seebeck coefficient with parabolic approximation. '''
fun1 = lambda z, x, T: 1e14 * self.Totaltime(z, T) * (z - x) * (-self.RelaxT.DfermidistrFun(z,x)) * self.RelaxT.ACO.Moment(z, T)**3
fun2 = lambda z, x, T: 1e14 * self.Totaltime(z, T) * (-self.RelaxT.DfermidistrFun(z,x)) * self.RelaxT.ACO.Moment(z, T)**3
self.seebeck = lambda x, T: Boltzm / C_e * quad(fun1, 0, Upperlimit(x), args=(x, T))[0] / quad(fun2, 0, Upperlimit(x), args=(x, T))[0]
def Get_seebeck(self, ACO = True, ACO_P = False, OPT = False, OPT_P = False, IMP = False, IMP_P = False):
'''
Calculate Seebeck coefficient.
Parameters:
----------
ACO: bool
If consider the acoustic phonon scattering in total relaxation time.
ACO_P: bool
If consider the acoustic phonon scattering with parabolic approximation in total relaxation time.
'''
fun1 = lambda z, T: 0
fun2 = lambda z, T: 0
fun3 = lambda z, T: 0
if ACO == True or ACO_P == True:
fun1 = lambda z, T: 1/self.RelaxT.ACO.Acotime(z, T)
if OPT == True or OPT_P == True:
if self.RelaxT.OPT.Diel.ion == 0:
pass
else:
fun2 = lambda z, T: 1/self.RelaxT.OPT.Opttime(z, T)
if IMP == True or IMP_P == True:
fun3 = lambda z, T: 1/self.RelaxT.IMP.Imptime(z, T)
self.Totaltime = lambda z, T: 1 / (fun1(z, T) + fun2(z, T) + fun3(z, T))
self.Get_seebeck_NP() | AICON | /AICON-2.0.1-py3-none-any.whl/aicon/seebeck.py | seebeck.py |
import scipy.constants
from scipy.integrate import quad
Boltzm = scipy.constants.Boltzmann
C_e = scipy.constants.e
def Upperlimit(x):
if x < 0:
return 100
else:
return x + 100
class Ekappa(object):
'''Electronic thermal conductivity class'''
def __init__(self, flag, RelaxTime):
self.value = 0.0
self.flag = flag
self.RelaxT = RelaxTime
def Get_lorenz_NP(self):
'''
Calculate Lorenz number with non-parabolic approximation.
'''
fun1 = lambda z, x, T: 1e14 * self.Totaltime(z, T) * z**2 * (-self.RelaxT.DfermidistrFun(z,x)) * self.RelaxT.ACO.Moment(z, T)**3 / (1 + 2 * self.RelaxT.Beta(T) * z)
fun2 = lambda z, x, T: 1e14 * self.Totaltime(z, T) * z * (-self.RelaxT.DfermidistrFun(z,x)) * self.RelaxT.ACO.Moment(z, T)**3 / (1 + 2 * self.RelaxT.Beta(T) * z)
self.integfun1 = lambda x, T: quad(fun1, 0, Upperlimit(x), args=(x, T))[0]
self.integfun2 = lambda x, T: quad(fun2, 0, Upperlimit(x), args=(x, T))[0]
def Get_lorenz_P(self):
'''
Calculate Lorenz number with parabolic approximation.
'''
fun1 = lambda z, x, T: 1e14 * self.Totaltime(z, T) * z**2 * (-self.RelaxT.DfermidistrFun(z,x)) * self.RelaxT.ACO.Moment(z, T)**3
fun2 = lambda z, x, T: 1e14 * self.Totaltime(z, T) * (-self.RelaxT.DfermidistrFun(z,x)) * self.RelaxT.ACO.Moment(z, T)**3
fun3 = lambda z, x, T: 1e14 * self.Totaltime(z, T) * z * (-self.RelaxT.DfermidistrFun(z,x)) * self.RelaxT.ACO.Moment(z, T)**3
self.lorenz = lambda x, T: (Boltzm / C_e)**2 * (quad(fun1, 0, Upperlimit(x), args=(x, T))[0] / quad(fun2, 0, Upperlimit(x), args=(x, T))[0] - \
(quad(fun3, 0, Upperlimit(x), args=(x, T))[0] / quad(fun2, 0, Upperlimit(x), args=(x, T))[0])**2)
def Get_ekappa(self, ACO = True, ACO_P = False, OPT = False, OPT_P = False, IMP = False, IMP_P = False):
'''
Calculate electronic thermal conductivity.
Parameters:
----------
ACO: bool
If consider the acoustic phonon scattering in total relaxation time.
ACO_P: bool
If consider the acoustic phonon scattering with parabolic approximation in total relaxation time.
'''
fun1 = lambda z, T: 0
fun2 = lambda z, T: 0
fun3 = lambda z, T: 0
if ACO == True or ACO_P == True:
fun1 = lambda z, T: 1/self.RelaxT.ACO.Acotime(z, T)
if OPT == True or OPT_P == True:
if self.RelaxT.OPT.Diel.ion == 0:
pass
else:
fun2 = lambda z, T: 1/self.RelaxT.OPT.Opttime(z, T)
if IMP == True or IMP_P == True:
fun3 = lambda z, T: 1/self.RelaxT.IMP.Imptime(z, T)
self.Totaltime = lambda z, T: 1 / (fun1(z, T) + fun2(z, T) + fun3(z, T))
self.Get_lorenz_NP()
# self.ekappa = lambda x, T: self.lorenz(x, T) * elcond(x, T) * T | AICON | /AICON-2.0.1-py3-none-any.whl/aicon/ekappa.py | ekappa.py |
import json
import os,sys,shutil
from PIL import Image
#from aicloud.voc_xml_generator import xml_fill
import yaml
import codecs
def find_image_size(filename):
with Image.open(filename) as img:
img_width = img.size[0]
img_height = img.size[1]
img_mode = img.mode
if img_mode == "RGB":
img_depth = 3
elif img_mode == "RGBA":
img_depth = 3
elif img_mode == "L":
img_depth = 1
else:
print("img_mode = %s is neither RGB or L" % img_mode)
eixt(0)
return img_width, img_height, img_depth
def json2voc(yamlPath):
yamlPath = yamlPath
f = open(yamlPath,'r',encoding='utf=8')
cfg = f.read()
cfgFile = yaml.load(cfg)
#创建所需文件夹
tt100k_parent_dir = cfgFile['OUTPUT_DIR']
work_sapce_dir = os.path.join(tt100k_parent_dir, "VOCdevkit/")
if not os.path.isdir(work_sapce_dir):
os.mkdir(work_sapce_dir)
work_sapce_dir = os.path.join(work_sapce_dir, "VOC2007/")
if not os.path.isdir(work_sapce_dir):
os.mkdir(work_sapce_dir)
jpeg_images_path = os.path.join(work_sapce_dir, 'JPEGImages')
annotations_path = os.path.join(work_sapce_dir, 'Annotations')
if not os.path.isdir(jpeg_images_path):
os.mkdir(jpeg_images_path)
if not os.path.isdir(annotations_path):
os.mkdir(annotations_path)
print('***************************数据集开始转换************************************')
input_labels_dir = cfgFile['INPUT_LABELS_DIR']
input_images_dir = cfgFile['INPUT_IMAGES_DIR']
for root,dirs, files in os.walk(input_labels_dir):
for f in files:
print('正在转换文件',f)
json_name = f.split(".")[0]
#读取标注信息并写入xml
with open(input_labels_dir+json_name +'.json', 'r',encoding='utf-8') as f:
object_json = json.load(f)
#打开对应图像文件
filename = input_images_dir + json_name + ".jpg"
width,height,channels = find_image_size(filename)
with codecs.open(annotations_path + '/' + json_name + ".xml", "w", "utf-8") as xml:
xml.write('<annotation>\n')
xml.write('\t<folder>' + 'VOC2007' + '</folder>\n')
xml.write('\t<filename>' + json_name + ".jpg" + '</filename>\n')
xml.write('\t<source>\n')
xml.write('\t\t<database>The UAV autolanding</database>\n')
xml.write('\t\t<annotation>UAV AutoLanding</annotation>\n')
xml.write('\t\t\n')
xml.write('\t\t<flickrid>NULL</flickrid>\n')
xml.write('\t</source>\n')
xml.write('\t<owner>\n')
xml.write('\t\t<flickrid>NULL</flickrid>\n')
xml.write('\t\t<name>NingWang</name>\n')
xml.write('\t</owner>\n')
xml.write('\t<size>\n')
xml.write('\t\t<width>'+ str(width) + '</width>\n')
xml.write('\t\t<height>'+ str(height) + '</height>\n')
xml.write('\t\t<depth>' + str(channels) + '</depth>\n')
xml.write('\t</size>\n')
xml.write('\t\t<segmented>0</segmented>\n')
num_obj=len(object_json['objects'])
for i in range(num_obj):
obj = object_json['objects'][i]
label = str(obj['f_code'])
for i in range(len(obj['obj_points'])):
xmin=int(obj['obj_points'][i]['x'])
ymin=int(obj['obj_points'][i]['y'])
xmax= xmin + int(obj['obj_points'][i]['w'])
ymax= ymin + int(obj['obj_points'][i]['h'])
if xmax <= xmin:
pass
elif ymax <= ymin:
pass
else:
xml.write('\t<object>\n')
xml.write('\t\t<name>'+ str(label) + '</name>\n')
xml.write('\t\t<pose>Unspecified</pose>\n')
xml.write('\t\t<truncated>1</truncated>\n')
xml.write('\t\t<difficult>0</difficult>\n')
xml.write('\t\t<difficult>0</difficult>\n')
xml.write('\t\t<bndbox>\n')
xml.write('\t\t\t<xmin>' + str(xmin) + '</xmin>\n')
xml.write('\t\t\t<ymin>' + str(ymin) + '</ymin>\n')
xml.write('\t\t\t<xmax>' + str(xmax) + '</xmax>\n')
xml.write('\t\t\t<ymax>' + str(ymax) + '</ymax>\n')
xml.write('\t\t</bndbox>\n')
xml.write('\t</object>\n')
#print(json_name,xmin,ymin,xmax,ymax,label)
xml.write('</annotation>')
return | AICloudSDK | /AICloudSDK-1.0.7.tar.gz/AICloudSDK-1.0.7/aicloud/json_to_voc.py | json_to_voc.py |
import os
import base64
import time
import sys
import traceback
import logging
import json
import requests
from aicloud.utils import RedisClient
import io
import importlib
import aicloud.settings as settings
# root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
root = os.path.abspath(os.path.dirname(os.getcwd()))
sdk_root = os.path.join(os.getcwd(), 'sdk')
sys.path.append(root)
sys.path.append(sdk_root)
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(name)s:%(levelname)s:%(message)s",
datefmt="%Y-%M-%d %H:%M:%S")
def __insert_redis(param_path, src_redis):
images_path = os.listdir(param_path)
images_dict = {}
count = 0
# 10 images are default
for image_path in images_path:
try:
if count < 10:
absolute_image_path = os.path.join(param_path, image_path)
logging.info(
"__insert_redis image path:{0} count:{1} ".format(absolute_image_path, count))
with open(absolute_image_path, 'rb') as f:
images_dict[image_path] = f.read()
count += 1
except Exception:
logging.error(traceback.format_exc())
now_time_key = time.strftime("%Y%m%d_%H%M%S", time.localtime())
src_batch_id = now_time_key + "_debug_image"
src_redis.hmset(src_batch_id, images_dict)
return src_batch_id
def simple_inference(infer_rlt_redis_dict=settings.DEBUG_INFER_RLT_REDIS,
is_debug=True, module_path=None, images_path=None):
logging.info(
'simple_inference input0 infer_rlt_redis_dict: {0} is_debug: {1} module_path: {2} images_path: {3}'.format(
infer_rlt_redis_dict, is_debug, module_path, images_path))
# defaults
debug_src_redis_dict = settings.DEBUG_SRC_REDIS_DICT
engine_class_name = settings.ENGINE_CLASS_NAME
load_model_method_name = settings.LOAD_MODEL_METHOD_NAME
inference_method_name = settings.INFERENCE_METHOD_NAME
release_method_name = settings.RELEASE_GPU_RES_METHOD_NAME
if is_debug:
if images_path:
# debug algorithm in notebook
src_redis = RedisClient(debug_src_redis_dict["redis_host"], debug_src_redis_dict["password"],
debug_src_redis_dict["db"])
src_batch_id = __insert_redis(images_path, src_redis)
result_key = src_batch_id + '_result_key'
progress_key = src_batch_id + '_progress_key'
result_redis = RedisClient(infer_rlt_redis_dict["redis_host"], infer_rlt_redis_dict["password"],
infer_rlt_redis_dict["db"])
else:
logging.error('there is no images_path in debug mode')
return
else:
src_batch_id = settings.SRC_BATCH_ID
result_key = settings.RESULT_KEY
progress_key = settings.PROGRESS_KEY
src_redis = RedisClient(settings.SRC_REDIS_HOSTS, settings.SRC_REDIS_PASSWORD, settings.SRC_REDIS_DB)
result_redis = RedisClient(settings.DST_REDIS_HOSTS, settings.DST_REDIS_PASSWORD, settings.DST_REDIS_DB)
if not module_path:
logging.error("no module_path")
return
else:
alg_module = importlib.import_module(module_path)
infer_engine = getattr(alg_module, engine_class_name)()
logging.info('__init__ method executed')
load_model_func = getattr(infer_engine, load_model_method_name)
load_model_func()
logging.info('load_model method executed')
count = 0
if is_debug or settings.INFERENCE_TYPE == '1':
# recoder progress count
key = src_redis.hkeys(src_batch_id)
if len(key) == 0:
logging.error('no source data found')
now_time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
for i in range(len(key)):
logging.info('batch inference start count is:' + str(count))
# read image from redis and encode
data = src_redis.hget(src_batch_id, key[i])
base64_data = base64.b64encode(data).decode()
result_json = __one_pic_inference__(infer_engine, inference_method_name, base64_data)
# store inference result and progress in redis
# we will not process exception about the redis in order to restart automatically
count = count + 1
result_redis.hset(result_key, str(key[i], encoding="utf-8"), json.dumps(result_json))
result_redis.set(progress_key, count)
if is_debug:
# write debug result
result_file_full_name = os.getcwd() + "/" + now_time_str + "_debug_result"
with open(result_file_full_name, 'a+') as f:
f.write(json.dumps(result_json) + "\n\n")
logging.info('your debug result has been written to' + result_file_full_name)
elif settings.INFERENCE_TYPE == '0':
# single pic inference
logging.info('batch inference single pic start ')
base64_data = src_redis.get(src_batch_id)
result_json = __one_pic_inference__(infer_engine, inference_method_name, base64_data)
result_redis.set(result_key, json.dumps(result_json))
count = count + 1
# post process
if is_debug:
src_redis.delete(src_batch_id)
result_redis.expire(result_key, 3600)
else:
# release GPU quotas
try:
release_func = getattr(infer_engine, release_method_name)
release_func()
tmpUrl = os.getenv("releaseQuotaUrl")
url = tmpUrl + str(count)
logging.info('releaseQuotaUrl url:{}'.format(url))
r = requests.get(url)
msg = 'ai successfully' if r.status_code == 200 else 'ai failed'
logging.info(msg)
except:
logging.error(traceback.format_exc())
def __one_pic_inference__(infer_engine, inference_method_name, base64_data):
result_json = {}
try:
t1 = time.time()
infer_func = getattr(infer_engine, inference_method_name)
result = infer_func(base64_data)
t2 = time.time()
infer_time = t2 - t1
result_json = json.loads(result)
# add inference time
result_json['infer_time'] = infer_time
except:
logging.error(traceback.format_exc())
result_json['msg'] = traceback.format_exc()
return result_json
if __name__ == '__main__':
simple_inference(is_debug=False, module_path=settings.ENGINE_MODULE_PATH) | AICloudSDK | /AICloudSDK-1.0.7.tar.gz/AICloudSDK-1.0.7/aicloud/batch_detection_service.py | batch_detection_service.py |
from aicloud.rest import get, post, put
import json
import os
class AIPlatform(object):
model_upload_path = '/model/file'
model_download_path = '/model/download/modelfile'
create_training_task = '/training/training/form'
create_training_duration = '/training/finish/form/'
def __init__(self, base_url, version='v1', authorization=None, auth_token=None):
self.authorization = authorization
self.auth_token = auth_token
self.url = base_url.rstrip('/') + '/ai/api/' + version
self.train_id_file = '/workspace/trainid.txt'
def _make_token_headers(self, content_type=None):
headers = {
'Authorization': self.authorization,
'auth-token': self.auth_token
}
if content_type:
headers['Content-Type'] = content_type
return headers
def upload_model_file(self, user_id, train_id, nbid, model_file):
"""
upload model file to cloud storage
:param user_id: user id
:param train_id: training id
:param model_file: model file by training
:return:
"""
if os.path.exists(self.train_id_file):
with open(self.train_id_file) as f:
train_id = f.readline()
url = self.url + self.model_upload_path
file = {'multipartFile': model_file}
data = {
'trainingId': train_id,
'userId': user_id,
'nbid': nbid
}
return post(url, headers=self._make_token_headers(), data=data, files=file)
def download_model_file(self, train_id):
"""
download model file from cloud storage
:param train_id: training id
:return:
"""
url = self.url + self.model_download_path
params = {'trainingId': train_id}
return get(url, headers=self._make_token_headers(), params=params)
def save_model_info(self, training_name, log_path, nbid):
"""
:param training_name:
:param nbid:
:param task_url:
:return:
"""
url = self.url + self.create_training_task
data = {
'trainingName': training_name,
'notebookId': nbid,
'logAddr': log_path
}
return post(url, headers=self._make_token_headers(content_type='application/json'), data=json.dumps(data))
def training_duration_info(self):
if os.path.exists(self.train_id_file):
with open(self.train_id_file) as f:
train_id = f.readline()
url = self.url + self.create_training_duration + str(train_id)
return put(url, headers=self._make_token_headers())
class StorageEngine(object):
pass | AICloudSDK | /AICloudSDK-1.0.7.tar.gz/AICloudSDK-1.0.7/aicloud/cloudapi.py | cloudapi.py |
import json
import os,sys,shutil
from PIL import Image
from aicloud.voc_xml_generator import xml_fill
import yaml
def find_image_size(filename):
with Image.open(filename) as img:
img_width = img.size[0]
img_height = img.size[1]
img_mode = img.mode
if img_mode == "RGB":
img_depth = 3
elif img_mode == "RGBA":
img_depth = 3
elif img_mode == "L":
img_depth = 1
else:
print("img_mode = %s is neither RGB or L" % img_mode)
eixt(0)
return img_width, img_height, img_depth
def json2voc(yamlPath):
yamlPath = yamlPath
f = open(yamlPath,'r',encoding='utf=8')
cfg = f.read()
cfgFile = yaml.load(cfg)
#创建所需文件夹
tt100k_parent_dir = cfgFile['OUTPUT_DIR']
work_sapce_dir = os.path.join(tt100k_parent_dir, "VOCdevkit/")
if not os.path.isdir(work_sapce_dir):
os.mkdir(work_sapce_dir)
work_sapce_dir = os.path.join(work_sapce_dir, "VOC2007/")
if not os.path.isdir(work_sapce_dir):
os.mkdir(work_sapce_dir)
jpeg_images_path = os.path.join(work_sapce_dir, 'JPEGImages')
annotations_path = os.path.join(work_sapce_dir, 'Annotations')
if not os.path.isdir(jpeg_images_path):
os.mkdir(jpeg_images_path)
if not os.path.isdir(annotations_path):
os.mkdir(annotations_path)
print('数据集开始转换')
input_labels_dir = cfgFile['INPUT_LABELS_DIR']
input_images_dir = cfgFile['INPUT_IMAGES_DIR']
for root,dirs, files in os.walk(input_labels_dir):
for f in files:
print('正在转换文件',f)
json_name = f.split(".")[0]
with open(input_labels_dir+json_name +'.json', 'r',encoding='utf-8') as f:
object_json = json.load(f)
#打开对应图像文件
filename = input_images_dir + json_name + ".jpg"
width,height,depth = find_image_size(filename)
filler = xml_fill(filename, width, height, depth)
num_obj=len(object_json['objects'])
for i in range(num_obj):
obj = object_json['objects'][i]
class_name = str(obj['f_code'])
for i in range(len(obj['obj_points'])):
xmin=int(obj['obj_points'][i]['x'])
ymin=int(obj['obj_points'][i]['y'])
xmax= xmin + int(obj['obj_points'][i]['w'])
ymax= ymin + int(obj['obj_points'][i]['h'])
filler.add_obj_box(class_name, xmin, ymin, xmax, ymax)
filler.save_xml(annotations_path + '/' + json_name + '.xml')
print("%s.xml saved\n"%json_name)
return | AICloudSDK | /AICloudSDK-1.0.7.tar.gz/AICloudSDK-1.0.7/aicloud/json_to_voc_copy.py | json_to_voc_copy.py |
import os
import json
env_dist = os.environ
# async message storage by kafka
if 'status_provider' in env_dist:
status_kafka = env_dist['status_provider']
if status_kafka is None:
print('Kafka status_provider does not exist!')
else:
status_kafka = json.loads(status_kafka)
KAFKA_HOST = status_kafka['address'] if 'address' in status_kafka.keys() else None
KAFKA_USERNAME = status_kafka['username'] if 'username' in status_kafka.keys() else None
KAFKA_PASSWORD = status_kafka['password'] if 'password' in status_kafka.keys() else None
KAFKA_TOPIC_STOP = status_kafka['topic'] if 'topic' in status_kafka.keys() else None
# data storage in Redis, the form of redis hosts and password like follow
# hosts = '10.70.151.183:6379' or
# ['10.70.151.183:6379', '10.70.151.225:6379', '10.70.151.205:6379',
# '10.70.151.232:6379', '10.70.151.239:6379', '10.70.151.231:6379']
# password = 'xxxxxxx'
if 'src_provider' in env_dist:
src_redis = env_dist['src_provider']
src_redis = json.loads(src_redis)
SRC_REDIS_HOSTS = src_redis['address']
SRC_REDIS_PASSWORD = src_redis['password'] if 'password' in src_redis.keys() else None
SRC_REDIS_DB = src_redis['database'] if 'database' in src_redis.keys() else 0
SRC_BATCH_ID = src_redis['batch_id']
if 'dst_provider' in env_dist:
dst_redis = env_dist['dst_provider']
dst_redis = json.loads(dst_redis)
DST_REDIS_HOSTS = dst_redis['address']
DST_REDIS_PASSWORD = dst_redis['password'] if 'password' in dst_redis.keys() else None
DST_REDIS_DB = dst_redis['database'] if 'database' in dst_redis.keys() else 0
RESULT_KEY = dst_redis['result_key']
PROGRESS_KEY = dst_redis['progress_key']
if 'inference_type' in env_dist:
INFERENCE_TYPE = env_dist['inference_type']
# DEBUG FINAL VALUES
DEBUG_INFER_RLT_REDIS = {"redis_host": "10.73.1.31:32730", "password": "ai13579", "db": "3"}
DEBUG_SRC_REDIS_DICT = {"redis_host": "10.73.1.31:32730", "password": "ai13579", "db": "1"}
ENGINE_MODULE_PATH = 'sdk.inference'
ENGINE_CLASS_NAME = 'Engine'
LOAD_MODEL_METHOD_NAME = 'load_model'
INFERENCE_METHOD_NAME = 'inference'
RELEASE_GPU_RES_METHOD_NAME = 'release' | AICloudSDK | /AICloudSDK-1.0.7.tar.gz/AICloudSDK-1.0.7/aicloud/settings.py | settings.py |
import zipfile
import time
import os
import tarfile
import uuid
from aicloud.cloudapi import AIPlatform
import json
import sys
import io
# sys.stdout = sys.__stdout__ = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8', line_buffering=True)
# sys.stderr = sys.__stderr__ = io.TextIOWrapper(sys.stderr.detach(), encoding='utf-8', line_buffering=True)
class AlgorithmEngine(object):
def __init__(self):
self.token = os.environ.get('token')
self.auth_token = os.environ.get('authToken')
self.user_id = os.environ.get('userId')
self.train_id = os.environ.get('trainId')
self.base_url = os.environ.get('baseUrl')
self.ai_platform = AIPlatform(base_url=self.base_url, authorization=self.token, auth_token=self.auth_token)
self.nbid = os.environ.get('nbid')
def upload_model(self, source_dir, target_dir=None):
"""
upload model file to cloud storage
:param source_dir: source directory of model files
:param target_dir: target directory to zip model files
:return:
"""
target_file = target_dir + str(uuid.uuid1()) + ".tar" if target_dir else str(uuid.uuid1()) + ".tar"
with tarfile.open(target_file, "w:") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
file = open(target_file, 'rb')
code, header, body = self.ai_platform.upload_model_file(user_id=self.user_id,
train_id=self.train_id,
nbid=self.nbid,
model_file=file)
# msg = 'upload model file successfully' if code == 200 else 'upload model file failed'
if code is not 200:
msg = 'upload model file failed'
return 1, msg
if json.loads(str(body.content, encoding = "utf-8"))['code'] == '0':
msg = 'upload model file successfully'
else:
msg = json.loads(str(body.content, encoding = "utf-8"))
# delete tar package
file.close()
os.remove(target_file)
return 0, msg
def download_model(self, local_model_path):
"""
download model file from cloud storage
:param local_model_path: local storage path
:return:
"""
code, header, body = self.ai_platform.download_model_file(train_id=self.train_id)
if code is not 200:
msg = 'download model file failed'
return 1, msg
if json.loads(str(body.content, encoding = "utf-8"))['code'] == '0':
msg = 'download model file successfully'
else:
msg = json.loads(str(body.content, encoding = "utf-8"))
#msg = 'download model file successfully'
now_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
file_absolute_path = local_model_path + '/' + now_time + '.tar'
try:
with open(file_absolute_path, "wb") as f:
f.write(body.content)
tar = tarfile.open(file_absolute_path)
tar.extractall(local_model_path)
tar.close()
except Exception as ex:
return 1, ex
if os.path.exists(file_absolute_path):
os.remove(file_absolute_path)
return 0, msg
def create_training_task(self, name, log_path, description=None):
"""
create training task, then can find in training list
training task include name, description, version, dataset
version is generated automatically associated with project, project is from env
dataset is training dataset from env
:param name: traitr(body.content, encoding = "utf-8")ing task name
:param description: description of the task
:return:
"""
#test_url = self.base_url + '/ai/api/v1/training/training/form'
code, header, body = self.ai_platform.save_model_info(name, log_path, self.nbid)
if code is not 200:
msg = 'create training task failed'
return 1, msg
if json.loads(str(body.content, encoding = "utf-8"))['code'] == '0':
msg = 'create training successfully'
train_id = json.loads(str(body.content, encoding = "utf-8"))['result']['data']['trainingId']
with open('/workspace/trainid.txt', 'w') as file:
file.write(str(train_id))
else:
msg = json.loads(str(body.content, encoding = "utf-8"))
return 0, msg
def create_training_duration(self):
#url = self.base_url + '/ai/api/v1/training/finish/form/'
code, header, body = self.ai_platform.training_duration_info()
if code is not 200:
msg = 'create training duration failed'
return 1, msg
if json.loads(str(body.content, encoding = "utf-8"))['code'] == '0':
msg = 'create training duration successfully'
else:
msg = json.loads(str(body.content, encoding = "utf-8"))
return 0, msg | AICloudSDK | /AICloudSDK-1.0.7.tar.gz/AICloudSDK-1.0.7/aicloud/engine.py | engine.py |
import requests
from aicloud.cloudexception import CommonError
class Object(dict):
def __init__(self, init_dict=None, deep=False):
super(Object, self).__init__()
if isinstance(init_dict, dict):
for k, v in init_dict.items():
if deep:
if isinstance(v, dict):
if not isinstance(v, Object):
v = Object(v, deep)
elif isinstance(v, (list, tuple)):
v = [Object(i, deep) if (isinstance(i, dict) and not isinstance(i, Object)) else i for i in v]
setattr(self, k, v)
self[k] = v
def __getattr__(self, key):
pass
def __getitem__(self, key):
return getattr(self, key)
def _resp(r, no_error):
if no_error and r.status_code >= 400:
raise CommonError(code=r.status_code, message=r.content)
return r.status_code, r.headers, r
def _resp_content(r):
body = ""
try:
if r.text:
body = r.json()
except:
raise CommonError(code=r.status_code, message=r.text)
return r.status_code, r.headers, Object(body, True), r.content
def head(url, headers={}, verify=False, no_error=False, hooks=None):
r = requests.get(url, headers=headers, verify=verify, hooks=hooks)
return _resp(r, no_error)
def get(url, headers={}, verify=False, no_error=False, params=None, hooks=None):
r = requests.get(url, headers=headers, verify=verify, params=params, hooks=hooks)
return _resp(r, no_error)
def post(url, headers={}, body=None, verify=False, no_error=False, data=None, files=None, hooks=None):
r = requests.post(url, headers=headers, json=body, data=data, verify=verify, files=files, hooks=hooks)
return _resp(r, no_error)
def post_timeout(url, headers={}, body=None, verify=False, no_error=False, hooks=None):
r = requests.post(url, headers=headers, json=body, verify=verify, timeout=8, hooks=hooks)
return _resp(r, no_error)
def put(url, headers={}, body=None, verify=False, no_error=False, params=None, data=None, hooks=None):
r = requests.put(url, headers=headers, json=body, params=params, data=data, verify=verify, hooks=hooks)
return _resp(r, no_error)
def patch(url, headers={}, body=None, verify=False, no_error=False, hooks=None):
r = requests.patch(url, headers=headers, json=body, verify=verify, hooks=hooks)
return _resp(r, no_error)
def delete(url, headers={}, verify=False, no_error=False, hooks=None):
r = requests.delete(url, headers=headers, verify=verify, hooks=hooks)
return _resp(r, no_error) | AICloudSDK | /AICloudSDK-1.0.7.tar.gz/AICloudSDK-1.0.7/aicloud/rest.py | rest.py |
import os
from os import path
import json
import numpy as np
import pandas as pd
import glob
import cv2
import os
import yaml
import shutil
from IPython import embed
from sklearn.model_selection import train_test_split
np.random.seed(41)
class Csv2CoCo:
def __init__(self,image_dir,total_annos, classname_to_id):
self.images = []
self.annotations = []
self.categories = []
self.img_id = 0
self.ann_id = 0
self.image_dir = image_dir
self.total_annos = total_annos
self.classname_to_id = classname_to_id
def save_coco_json(self, instance, save_path):
json.dump(instance, open(save_path, 'w'), ensure_ascii=False, indent=2) # indent=2 更加美观显示
# 由txt文件构建COCO
def to_coco(self, keys, classname_to_id):
self._init_categories(classname_to_id)
for key in keys:
self.images.append(self._image(key))
shapes = self.total_annos[key]
for shape in shapes:
bboxi = []
for cor in shape[:-1]:
bboxi.append(int(cor))
label = shape[-1]
# if label != '16007' or label != '16008':
# continue
annotation = self._annotation(bboxi,label, classname_to_id)
self.annotations.append(annotation)
self.ann_id += 1
self.img_id += 1
instance = {}
instance['info'] = 'spytensor created'
instance['license'] = ['license']
instance['images'] = self.images
instance['annotations'] = self.annotations
instance['categories'] = self.categories
return instance
# 构建类别
def _init_categories(self, classname_to_id):
for k, v in classname_to_id.items():
category = {}
category['id'] = v
category['name'] = k
self.categories.append(category)
# 构建COCO的image字段
def _image(self, path):
image = {}
#print(self.image_dir + path)
print('正在转换:', path)
img = cv2.imread(self.image_dir + path)
image['height'] = img.shape[0]
image['width'] = img.shape[1]
image['id'] = self.img_id
image['file_name'] = path
return image
# 构建COCO的annotation字段
def _annotation(self, shape,label, classname_to_id):
# label = shape[-1]
points = shape[:4]
annotation = {}
annotation['id'] = self.ann_id
annotation['image_id'] = self.img_id
annotation['category_id'] = int(classname_to_id[label])
annotation['segmentation'] = self._get_seg(points)
annotation['bbox'] = self._get_box(points)
annotation['iscrowd'] = 0
annotation['area'] = self._get_area(points)
return annotation
# COCO的格式: [x1,y1,w,h] 对应COCO的bbox格式
def _get_box(self, points):
min_x = points[0]
min_y = points[1]
max_x = points[2]
max_y = points[3]
return [min_x, min_y, max_x - min_x, max_y - min_y]
# 计算面积
def _get_area(self, points):
min_x = points[0]
min_y = points[1]
max_x = points[2]
max_y = points[3]
return (max_x - min_x+1) * (max_y - min_y+1)
# segmentation
def _get_seg(self, points):
min_x = points[0]
min_y = points[1]
max_x = points[2]
max_y = points[3]
h = max_y - min_y
w = max_x - min_x
a = []
a.append([min_x,min_y, min_x,min_y+0.5*h, min_x,max_y, min_x+0.5*w,max_y, max_x,max_y, max_x,max_y-0.5*h, max_x,min_y, max_x-0.5*w,min_y])
return a
def json_to_csv(path, img_path):
json_list = []
for json_file in glob.glob(path + '/*.json'): #返回所有匹配的文件路径列表
fp_json_file = json.load(open(json_file, "r"))
filename = os.path.basename(json_file)
for multi in fp_json_file["objects"]:
points = multi["obj_points"][0]
xmin = points["x"]
ymin = points["y"]
xmax = points["x"]+points["w"]-1
ymax = points["y"]+points["h"]-1
label = multi["f_code"]
if xmax <= xmin: pass
elif ymax <= ymin: pass
value = (img_path +'/' + filename[:-5] + '.jpg',
int(xmin),
int(ymin),
int(xmax),
int(ymax),
str(label))
json_list.append(value)
column_name = ['filename', 'xmin', 'ymin', 'xmax', 'ymax', 'class']
xml_df = pd.DataFrame(json_list, columns=column_name)
return xml_df
def json2coco(yamlPath):
yamlPath = yamlPath
f = open(yamlPath,'r',encoding='utf=8')
cfg = f.read()
cfgFile = yaml.load(cfg)
img_path = cfgFile['INPUT_IMAGES_DIR']
print('输入图像路径:', img_path)
output_path = cfgFile['OUTPUT_DIR']
print('输出结果路径:', output_path)
#print('数据集开始转换')
xml_df = json_to_csv(cfgFile['INPUT_LABELS_DIR'], img_path)
## 修改文件名称
xml_df.to_csv('./scratches.csv', index=None)
#print('Successfully converted xml to csv.')
# 标注路径
label_path = cfgFile['INPUT_LABELS_DIR']
print('输入标注路径:', label_path)
csv_file= "./scratches.csv"
classname_to_id = cfgFile['CLASS_ID']
print('对象ID:', classname_to_id)
print('********************数据集开始转换*************************')
# 整合csv格式标注文件
total_csv_annotations = {}
annotations = pd.read_csv(csv_file,header=None).values
for annotation in annotations:
key = annotation[0].split(os.sep)[-1]
value = np.array([annotation[1:]])
if key in total_csv_annotations.keys():
total_csv_annotations[key] = np.concatenate((total_csv_annotations[key],value),axis=0)
else:
total_csv_annotations[key] = value
# 按照键值划分数据
total_keys = list(total_csv_annotations.keys())
train_keys, val_keys = train_test_split(total_keys[1:], test_size=0.2) #去掉第一行
print('*******************训练集和验证集划分**********************')
print("训练集数目:", len(train_keys), '验证集数目:', len(val_keys))
# 创建必须的文件夹
#print('转换后数据保存在datasets目录')
if not os.path.exists(output_path + 'datasets/annotations/ark/'):
os.makedirs(output_path + 'datasets/annotations/ark/')
if not os.path.exists(output_path+ 'datasets/train/'):
os.makedirs(output_path + 'datasets/train/')
if not os.path.exists(output_path + 'datasets/test/'):
os.makedirs(output_path + 'datasets/test/')
# 把训练集转化为COCO的json格式
print('*********************训练集转换中,请稍后**************************')
l2c_train = Csv2CoCo(image_dir=img_path, total_annos=total_csv_annotations, classname_to_id=classname_to_id)
train_instance = l2c_train.to_coco(train_keys, classname_to_id)
l2c_train.save_coco_json(train_instance, output_path + 'datasets/annotations/ark/instances_train2020.json')
print('**********************训练集转换完成*******************************')
print('***********************验证集转换中,请稍后*************************')
#for file in train_keys:
# shutil.copy(img_path+file,output_path + "datasets/train/")
#for file in val_keys:
# shutil.copy(img_path+file,output_path + "datasets/test/")
# 把验证集转化为COCO的json格式
l2c_val = Csv2CoCo(image_dir=img_path,total_annos=total_csv_annotations, classname_to_id=classname_to_id)
val_instance = l2c_val.to_coco(val_keys, classname_to_id)
l2c_val.save_coco_json(val_instance, output_path + 'datasets/annotations/ark/instances_val2020.json')
print('************************验证集转换完成****************************')
print('************************正在进行图片拷贝**************************')
for file in train_keys:
shutil.copy(img_path+file,output_path + "datasets/train/")
for file in val_keys:
shutil.copy(img_path+file,output_path + "datasets/test/")
print('************************数据集转换完成****************************') | AICloudSDK | /AICloudSDK-1.0.7.tar.gz/AICloudSDK-1.0.7/aicloud/json_to_coco.py | json_to_coco.py |
import logging
from logging.handlers import TimedRotatingFileHandler
from kafka import KafkaProducer, KafkaConsumer
class RedisClient(object):
def __init__(self, hosts, password=None, db=0):
redis_hosts = self.parse_redis_hosts(hosts)
if redis_hosts.__len__() is 0:
self.rclient = None
elif redis_hosts.__len__() is 1:
import redis
self.rclient = redis.StrictRedis(password=password,
db=db,
**redis_hosts[0])
else:
import rediscluster
self.rclient = rediscluster.RedisCluster(password=password, startup_nodes=redis_hosts)
@staticmethod
def parse_redis_hosts(hosts):
redis_hosts = []
if isinstance(hosts, str):
hosts = hosts.split(':')
redis_hosts.append({'host': hosts[0],
'port': hosts[1]})
return redis_hosts
for host in hosts:
address = host.split(':')
redis_hosts.append({'host': address[0],
'port': address[1]})
return redis_hosts
def set(self, key, value, seconds=None):
return self.rclient.set(key, value, px=seconds)
def get(self, key):
return self.rclient.get(key)
def hkeys(self, name):
return self.rclient.hkeys(name)
def hset(self, name, key, value):
return self.rclient.hset(name, key, value)
def hget(self, name, key):
return self.rclient.hget(name, key)
def delete(self, key):
return self.rclient.delete(key)
def hmset(self, name, mapping):
return self.rclient.hmset(name, mapping)
def expire(self, key, time):
return self.rclient.expire(key, time)
class KafkaClient(object):
def __init__(self, host, username=None, password=None):
self.producer = KafkaProducer(bootstrap_servers=host,
sasl_plain_username=username,
sasl_plain_password=password)
self.consumer = KafkaConsumer(bootstrap_servers=host,
sasl_plain_username=username,
sasl_plain_password=password)
def send(self, topic, value):
return self.producer.send(topic=topic, value=value)
def p_close(self):
return self.producer.close()
def subscribe(self, topics):
return self.consumer.subscribe(topics=topics)
def poll(self):
return self.consumer.poll()
def c_close(self):
return self.consumer.close()
def close_all(self):
self.producer.close()
self.consumer.close()
return 0
# log
def log():
handler = TimedRotatingFileHandler("inference.log", when="D", interval=2, backupCount=5,
encoding="UTF-8", delay=False, utc=True)
logging_format = logging.Formatter(
'%(asctime)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')
handler.setFormatter(logging_format)
return handler | AICloudSDK | /AICloudSDK-1.0.7.tar.gz/AICloudSDK-1.0.7/aicloud/utils.py | utils.py |
import os
import json
import time
from PIL import Image
import numpy as np
import cv2
import yaml
def json2mapillary(yamlPath):
yamlPath = yamlPath
f = open(yamlPath,'r',encoding='utf=8')
cfg = f.read()
cfgFile = yaml.load(cfg)
print('***************************数据集开始转换*****************************')
input_labels_dir = cfgFile['INPUT_LABELS_DIR']
input_images_dir = cfgFile['INPUT_IMAGES_DIR']
output_dir = cfgFile['OUTPUT_DIR']
class_type = cfgFile['CLASS_TYPE']
for root,dirs, files in os.walk(input_labels_dir):
for f in files:
print('正在转换文件',f)
json_name = f.split(".")[0]
with open(input_labels_dir+json_name +'.json', 'r',encoding='utf-8') as f:
object_json = json.load(f)
num_obj=len(object_json['objects'])
image = Image.open(input_images_dir+json_name +'.jpg')
image = np.array(image)
image_h, image_w = image.shape[:2]
image_temp =np. zeros((image_h,image_w),dtype=np.uint8)
#print('num_obj=',num_obj)
for i in range(num_obj):
obj = object_json['objects'][i]
class_name = str(obj['f_code'])
for item in class_type:
if class_name == str(item):
label_polygon = []
for i in range(len(obj['obj_points'])):
x=int(obj['obj_points'][i]['x'])
y=int(obj['obj_points'][i]['y'])
label_xy = [x,y]
label_polygon.append(label_xy)
a = np.array(label_polygon)
cv2.fillPoly(image_temp, [a], class_type[item])
cv2.imwrite(output_dir + json_name +'.png', image_temp) | AICloudSDK | /AICloudSDK-1.0.7.tar.gz/AICloudSDK-1.0.7/aicloud/json_to_mapillary.py | json_to_mapillary.py |
import os
from os import path
import json
import numpy as np
import pandas as pd
import glob
import cv2
import os
import yaml
import shutil
from IPython import embed
from sklearn.model_selection import train_test_split
np.random.seed(41)
class Csv2CoCo:
def __init__(self,image_dir,total_annos):
self.images = []
self.annotations = []
self.categories = []
self.img_id = 0
self.ann_id = 0
self.image_dir = image_dir
self.total_annos = total_annos
def save_coco_json(self, instance, save_path):
json.dump(instance, open(save_path, 'w'), ensure_ascii=False, indent=2) # indent=2 更加美观显示
# 由txt文件构建COCO
def to_coco(self, keys):
self._init_categories()
for key in keys:
self.images.append(self._image(key))
shapes = self.total_annos[key]
for shape in shapes:
bboxi = []
for cor in shape[:-1]:
bboxi.append(int(cor))
label = shape[-1]
# if label != '16007' or label != '16008':
# continue
annotation = self._annotation(bboxi,label)
self.annotations.append(annotation)
self.ann_id += 1
self.img_id += 1
instance = {}
instance['info'] = 'spytensor created'
instance['license'] = ['license']
instance['images'] = self.images
instance['annotations'] = self.annotations
instance['categories'] = self.categories
return instance
# 构建类别
def _init_categories(self):
for k, v in classname_to_id.items():
category = {}
category['id'] = v
category['name'] = k
self.categories.append(category)
# 构建COCO的image字段
def _image(self, path):
image = {}
print(self.image_dir + path)
img = cv2.imread(self.image_dir + path)
image['height'] = img.shape[0]
image['width'] = img.shape[1]
image['id'] = self.img_id
image['file_name'] = path
return image
# 构建COCO的annotation字段
def _annotation(self, shape,label):
# label = shape[-1]
points = shape[:4]
annotation = {}
annotation['id'] = self.ann_id
annotation['image_id'] = self.img_id
annotation['category_id'] = int(classname_to_id[label])
annotation['segmentation'] = self._get_seg(points)
annotation['bbox'] = self._get_box(points)
annotation['iscrowd'] = 0
annotation['area'] = self._get_area(points)
return annotation
# COCO的格式: [x1,y1,w,h] 对应COCO的bbox格式
def _get_box(self, points):
min_x = points[0]
min_y = points[1]
max_x = points[2]
max_y = points[3]
return [min_x, min_y, max_x - min_x, max_y - min_y]
# 计算面积
def _get_area(self, points):
min_x = points[0]
min_y = points[1]
max_x = points[2]
max_y = points[3]
return (max_x - min_x+1) * (max_y - min_y+1)
# segmentation
def _get_seg(self, points):
min_x = points[0]
min_y = points[1]
max_x = points[2]
max_y = points[3]
h = max_y - min_y
w = max_x - min_x
a = []
a.append([min_x,min_y, min_x,min_y+0.5*h, min_x,max_y, min_x+0.5*w,max_y, max_x,max_y, max_x,max_y-0.5*h, max_x,min_y, max_x-0.5*w,min_y])
return a
def json_to_csv(path):
json_list = []
for json_file in glob.glob(path + '/*.json'): #返回所有匹配的文件路径列表
fp_json_file = json.load(open(json_file, "r"))
filename = os.path.basename(json_file)
for multi in fp_json_file["objects"]:
points = multi["obj_points"][0]
xmin = points["x"]
ymin = points["y"]
xmax = points["x"]+points["w"]-1
ymax = points["y"]+points["h"]-1
label = multi["f_code"]
if xmax <= xmin: pass
elif ymax <= ymin: pass
value = (img_path +'/' + filename[:-5] + '.jpg',
int(xmin),
int(ymin),
int(xmax),
int(ymax),
str(label[0:2]))
json_list.append(value)
column_name = ['filename', 'xmin', 'ymin', 'xmax', 'ymax', 'class']
xml_df = pd.DataFrame(json_list, columns=column_name)
return xml_df
def json2coco(yamlPath):
yamlPath = yamlPath
f = open(yamlPath,'r',encoding='utf=8')
cfg = f.read()
cfgFile = yaml.load(cfg)
img_path = cfgFile['INPUT_IMAGES_DIR']
output_path = cfgFile['OUTPUT_DIR']
#print('output_path = ',output_path )
print('数据集开始转换')
xml_df = json_to_csv(cfgFile['INPUT_LABELS_DIR'])
## 修改文件名称
xml_df.to_csv('./scratches.csv', index=None)
print('Successfully converted xml to csv.')
# 标注路径
label_path = cfgFile['INPUT_LABELS_DIR']
csv_file= "./scratches.csv"
classname_to_id = cfgFile['CLASS_ID']
# 整合csv格式标注文件
total_csv_annotations = {}
annotations = pd.read_csv(csv_file,header=None).values
for annotation in annotations:
key = annotation[0].split(os.sep)[-1]
value = np.array([annotation[1:]])
if key in total_csv_annotations.keys():
total_csv_annotations[key] = np.concatenate((total_csv_annotations[key],value),axis=0)
else:
total_csv_annotations[key] = value
# 按照键值划分数据
total_keys = list(total_csv_annotations.keys())
train_keys, val_keys = train_test_split(total_keys[1:], test_size=0.2) #去掉第一行
print("train_n:", len(train_keys), 'val_n:', len(val_keys))
# 创建必须的文件夹
if not os.path.exists(output_path + 'datasets/annotations/ark/'):
os.makedirs(output_path + 'datasets/annotations/ark/')
if not os.path.exists(output_path+ 'datasets/train/'):
os.makedirs(output_path + 'datasets/train/')
if not os.path.exists(output_path + 'datasets/test/'):
os.makedirs(output_path + 'datasets/test/')
# 把训练集转化为COCO的json格式
l2c_train = Csv2CoCo(image_dir=img_path, total_annos=total_csv_annotations)
train_instance = l2c_train.to_coco(train_keys)
l2c_train.save_coco_json(train_instance, output_path + 'datasets/annotations/ark/instances_train2020.json')
for file in train_keys:
shutil.copy(img_path+file,output_path + "datasets/train/")
for file in val_keys:
shutil.copy(img_path+file,output_path + "datasets/test/")
# 把验证集转化为COCO的json格式
l2c_val = Csv2CoCo(image_dir=img_path,total_annos=total_csv_annotations)
val_instance = l2c_val.to_coco(val_keys)
l2c_val.save_coco_json(val_instance, output_path + 'coco/annotations/ark/instances_val2020.json') | AICloudSDK | /AICloudSDK-1.0.7.tar.gz/AICloudSDK-1.0.7/aicloud/datatransform/coco/json2coco.py | json2coco.py |
import json
import os,sys,shutil
from PIL import Image
from voc_xml_generator import xml_fill
import yaml
def find_image_size(filename):
with Image.open(filename) as img:
img_width = img.size[0]
img_height = img.size[1]
img_mode = img.mode
if img_mode == "RGB":
img_depth = 3
elif img_mode == "RGBA":
img_depth = 3
elif img_mode == "L":
img_depth = 1
else:
print("img_mode = %s is neither RGB or L" % img_mode)
eixt(0)
return img_width, img_height, img_depth
def json2voc(yamlPath):
yamlPath = yamlPath
f = open(yamlPath,'r',encoding='utf=8')
cfg = f.read()
cfgFile = yaml.load(cfg)
#创建所需文件夹
tt100k_parent_dir = cfgFile['OUTPUT_DIR']
work_sapce_dir = os.path.join(tt100k_parent_dir, "VOCdevkit/")
if not os.path.isdir(work_sapce_dir):
os.mkdir(work_sapce_dir)
work_sapce_dir = os.path.join(work_sapce_dir, "VOC2007/")
if not os.path.isdir(work_sapce_dir):
os.mkdir(work_sapce_dir)
jpeg_images_path = os.path.join(work_sapce_dir, 'JPEGImages')
annotations_path = os.path.join(work_sapce_dir, 'Annotations')
if not os.path.isdir(jpeg_images_path):
os.mkdir(jpeg_images_path)
if not os.path.isdir(annotations_path):
os.mkdir(annotations_path)
print('数据集开始转换')
input_labels_dir = cfgFile['INPUT_LABELS_DIR']
input_images_dir = cfgFile['INPUT_IMAGES_DIR']
for root,dirs, files in os.walk(input_labels_dir):
for f in files:
print('正在转换文件',f)
json_name = f.split(".")[0]
with open(input_labels_dir+json_name +'.json', 'r',encoding='utf-8') as f:
object_json = json.load(f)
#打开对应图像文件
filename = input_images_dir + json_name + ".jpg"
width,height,depth = find_image_size(filename)
filler = xml_fill(filename, width, height, depth)
num_obj=len(object_json['objects'])
for i in range(num_obj):
obj = object_json['objects'][i]
class_name = str(obj['f_code'])
for i in range(len(obj['obj_points'])):
xmin=int(obj['obj_points'][i]['x'])
ymin=int(obj['obj_points'][i]['y'])
xmax= xmin + int(obj['obj_points'][i]['w'])
ymax= ymin + int(obj['obj_points'][i]['h'])
filler.add_obj_box(class_name, xmin, ymin, xmax, ymax)
filler.save_xml(annotations_path + '/' + json_name + '.xml')
print("%s.xml saved\n"%json_name) | AICloudSDK | /AICloudSDK-1.0.7.tar.gz/AICloudSDK-1.0.7/aicloud/datatransform/voc/json2voc.py | json2voc.py |
import json
import logging
import os
import re
from typing import Union, Tuple
import argparse
from overrides import overrides
from pyhocon import ConfigFactory, ConfigTree
class ConfigReader:
"""
Reads and parses the config under the given config.
Given a default config merges it with the given config (useful when
you have a default application config).
"""
def __init__(self, path='', default=''):
"""
Initializes the reader.
Accepts a config config and a default config config (or resource).
:param path: Path to the config.
:param default: fallback config (path, resource or string)
"""
try:
# is resource
default = default.decode("utf-8")
except AttributeError:
try:
# is path
default = ConfigFactory.parse_file(default)
except (FileNotFoundError, TypeError, OSError):
# let's assume it's just a string (for now)
pass
if not path and not default:
raise ValueError("Either config or fallback must be specified!")
if path:
try:
self.config = ConfigFactory.parse_file(path)
except FileNotFoundError:
logging.getLogger(__name__).warning(
"{} doesn't exist! will load default only! {}".format(
path, os.getcwd())
)
self.config = ""
else:
self.config = ""
if isinstance(default, str):
default: ConfigTree = ConfigFactory.parse_string(default)
if isinstance(self.config, str):
self.config: ConfigTree = ConfigFactory.parse_string(self.config)
self.default: ConfigTree = default
def read_config(self):
"""
Reads the config and returns a parsed mapping between config
keys and (parsed) values.
If no config is found under config, loads the default config only.
:return: Mapping from config entries to their parsed values.
"""
if not self.config:
return self.default
# cfg_str = '{}\n{}'.format(self.default, self.config)
if not self.default:
return self.config
if self.config and self.default:
return ConfigTree.merge_configs(self.default, self.config)
class ConfArgumentParser(argparse.ArgumentParser):
int_only = re.compile(r'[-+]?([\d]+)$')
int_or_float = numeric_const_pattern = re.compile(
r"[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?")
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
parents=None,
formatter_class=argparse.HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True,
allow_abbrev=True,
default_config=None):
super().__init__(prog, usage, description, epilog, parents or [],
formatter_class, prefix_chars,
fromfile_prefix_chars, argument_default,
conflict_handler, add_help, allow_abbrev)
self.default_config = default_config
self._args = None
self._conf = None
@property
def args(self):
return self._args
@property
def conf(self):
return self._conf
def parse_string(self,
k: str,
v: str) -> Tuple[str, Union[bool, str, int, float]]:
if v in ["yes", "true"]:
return k, True
if v in ['no', 'false']:
return k, False
if re.match(ConfArgumentParser.int_only, v):
return k, int(v)
if re.match(ConfArgumentParser.int_or_float, v):
return k, float(v)
return k, v
@overrides
def parse_args(self, args=None, namespace=None):
args = super().parse_args(args, namespace)
self._conf = ConfigReader(args.config,
default=self.default_config).read_config()
self._args = args
if args.c:
for cs in args.c:
for c in cs:
self._conf = ConfigTree.merge_configs(
self._conf,
ConfigFactory.parse_string(c)
)
return args
def get_arg_parser(description, default_config) -> ConfArgumentParser:
parser = ConfArgumentParser(description=description,
default_config=default_config)
parser.add_argument("config", metavar="config", type=str)
parser.add_argument('-c', action='append', nargs='+')
return parser
def format_config(config) -> str:
"""
Formats the config in a more-or-less readable way.
:class:`ConfigReader`
:meth:`ConfigReader.read_config`
:param config: Config to be formatted.
:return: Formatted config.
"""
return json.dumps(config.as_plain_ordered_dict(), indent=4)
def fqcn(cls):
return '.'.join((cls.__class__.__module__, cls.__class__.__name__)) | AIConf | /AIConf-1.1.7-py3-none-any.whl/aiconf/aiconf.py | aiconf.py |
import logging
import logging.config
from collections import Iterable, Mapping
from copy import deepcopy
from typing import List, Union, Type, TypeVar
from pyhocon import ConfigTree
from aiconf.aiconf import format_config
from aiconf.exceptions import MalformattedConfigException
U = TypeVar("U")
def load_class(class_string: str,
restrict_to: Union[Type[U], List[Type[U]]] = None,
relative_import: str = ""):
try:
module_name, cls_name = class_string.rsplit(".", 1)
except ValueError:
cls_name = class_string
module_name = ""
if relative_import:
x = ".".join((relative_import, module_name)).rstrip(".")
try:
mod = __import__(x, fromlist=cls_name)
except ModuleNotFoundError:
mod = __import__(module_name, fromlist=cls_name)
else:
mod = __import__(module_name, fromlist=cls_name)
cls = getattr(mod, cls_name)
if restrict_to:
check_subclass(cls, restrict_to)
return cls
def get_all_subclasses(cls):
"""
Returns all (currently imported) subclasses of a given class.
:param cls: Class to get subclasses of.
:return: all currently imported subclasses.
"""
return set(cls.__subclasses__()).union(
s for c in cls.__subclasses__() for s in get_all_subclasses(c))
def check_subclass(cls, restrict_to):
"""
Performs a check whether a given class is a subclass of given class(es).
:raises: :class:`ValueError` if class is not subclass of given class(es).
:param cls: Class to check.
:param restrict_to: Class(es) to check whether the given class is subclass of.
"""
if not restrict_to:
return
if not isinstance(restrict_to, Iterable) and not isinstance(restrict_to,
str):
restrict_to = [restrict_to]
if not any(cls == target_cls or cls in get_all_subclasses(target_cls) for
target_cls in restrict_to):
raise ValueError(
"{} is not subclass of any of {}".format(cls, restrict_to))
def get_constructor_and_args(config):
# class only
if not isinstance(config, ConfigTree) and not isinstance(config, dict):
return config, dict()
# who knows whether we're going to reuse the config one day...
config = deepcopy(config)
# new style factory method
if "()" in config:
cls = config.pop('()')
return cls, config
# old style
elif "class" in config and 'args' in config:
return config["class"], config["args"]
# new style class
elif "class" in config and 'args' not in config:
cls = config.pop('class')
return cls, config
# wrong style
else:
raise MalformattedConfigException(
"The config must be either provided in the format {{class: cls; args: {{}} }}"
"or {{class/(): cls; arg1:val1; arg2:val2;}} but was {}.".format(
format_config(config))
)
def safe_construct(config_or_instance: Union[ConfigTree, U],
restrict_to: Union[Type[U], List[Type[U]]] = None,
relative_import: str = "",
perform_subclass_check=True, **kwargs) -> U:
"""
Helper function to combine load_class and construct.
Useful when an argument could be either already an instance or a
config describing how to set up the instance.
:param perform_subclass_check: Whether to perform the subclass check
even if the given argument is an instance already.
:param relative_import: String to prepend to the absolute import.
:param restrict_to: Class to be constructed must be of this type(s).
:param config_or_instance: Config to construct from or the actual
instance.
:param kwargs: Keyword args to be passed to constructor
:return: Parsed class if possible.
"""
if isinstance(config_or_instance, ConfigTree):
return construct_from_config(config_or_instance, restrict_to,
relative_import, **kwargs)
if perform_subclass_check:
check_subclass(config_or_instance.__class__, restrict_to)
return config_or_instance
def construct_from_config(config: ConfigTree,
restrict_to: Union[Type[U], List[Type[U]]] = None,
relative_import: str = "", **kwargs) -> U:
"""
Helper function to combine load_class and construct.
Config layout supposed to be like this:
``{class_string: args_as_cfg}``
where args_as_cfg are either dict, list or a single argument
:param relative_import: String to prepend to the absolute import.
:param restrict_to: Class to be constructed must be of this type(s).
:param config: Config to construct from.
:return: Parsed class if possible.
"""
logger = logging.getLogger(__name__)
cls_name, construct_args = get_constructor_and_args(config)
logger.debug("Loading '{}' from config...".format(cls_name))
cls = load_class(cls_name, restrict_to=restrict_to,
relative_import=relative_import)
return construct(cls, construct_args, **kwargs)
def construct(cls: Type[U], args_as_cfg: ConfigTree, cascading=True,
**kwargs) -> U:
"""
Default factory method for a given class and given arguments.
Construct the class with given arguments. Arguments parameter can be
a single argument, a List.
:param cls: Given arguments
:param args_as_cfg:
:param cascading: Whether to progressively construct nesting classes.
:return:
"""
logger = logging.getLogger(__name__)
if isinstance(args_as_cfg, Mapping):
args = deepcopy(args_as_cfg)
for name, value in args.items():
if isinstance(value, ConfigTree) and cascading:
args[name] = construct_from_config(value)
for k, v in kwargs.items():
args.put(k, v)
logger.debug(
"Constructing {} with keyword arguments {}".format(cls.__name__,
args))
return cls(**args)
if isinstance(args_as_cfg, Iterable) and not isinstance(args_as_cfg, str):
logger.debug("Constructing {} with arguments...".format(cls.__name__))
return cls(*args_as_cfg)
else:
# one arg only
logger.debug(
"Constructing {} with one single argument".format(cls.__name__))
return cls(args_as_cfg) | AIConf | /AIConf-1.1.7-py3-none-any.whl/aiconf/construct.py | construct.py |
# [AIDK](https://laughing-waddle-b1e4ead5.pages.github.io/html/)
AI Democratization Kit (AIDK) is a set of kits for E2E AI democratization on CPU. It is a pipeline framework that streamlines AI democratization technologies in each stage of E2E AI pipeline, including data processing, feature engineering, training, hyper-parameter tunning, and inference. AIDK delivers high performance, lightweight models efficiently on commodity hardware.
# Introduction
Modern end to end machine learning or deep learning system usually includes a lifecycle of data processing, feature engineering, training, inference and serving. Each different stage might has different challenges, for example, the data to be processed might be huge, and thus require a signficant amount of time to process the data and the data ETL and pre-processing time often take much more time than training. For feature engineering phase, usually numerous sets of new features need to be created, and then tested for effectiveness. For model training, one of the entry barrier is the model could be quite complex and usually requires lots of expertise to design, understand, tune and deploy the models. What makes things worse is one usually needs to repeate the experiements of the ETL, feature engineering, training and evaluation gprocess for many times on many models, to get the best model, which requires signficant amount of computational resources and is time-consuming.
# End-to-End AI democratization
One approach to resolve those challenges is AI democratization, which trying to make AI accessabile & affordable to every organization and every data scientist. There are a lot of things to be democratized, including:
1. Data accessibility & quality, where you make the data access easier and simpler; building a data platform to democratize the data management - simplify data ingestion, data explore, processing and visulaization.
2. Storage and compute platforms, instead of running the AI on hi-cost GPU, run it on democratized commodity hardware with auto-scaling.
3. Algorithms - Democratize the use, development and sharing of ML & DL algorithms; reduce the engry barrier with automatic model searching, AutoML
4. Model development - select the most suitalbe models for users, democratize the end to end model development
5. Market place - simply how you access, use, exchange and monetization of data, algorithm, models, and outcomes
# AIDK
AIDK is the project to democratize E2E AI on CPU. The strategy is to bring E2E AI to existing CPU installation base with good-enough performance and zero additional cost, it drives the AI portaion on CPU and drive CPU/GPU balance in E2E AI. The core componements of AIDK are: model advisor and model acclerator. Model acclerator provides build-in intelligence to generate parameterized models, while model acclerator leverages train-free NAS to generate domain-specific models. As a start, AIDK democrated recommender systems on distributed CPU clusters through different optimization of each machine learning life cycle phases.
# Architecture
Below firgure showed the architecture diagram of AIDK.

# Major Componments
Here are the major componments of Bluewhale:
1. RecDP - scalable data processing and feature engineering kit based on Spark and Modin
2. Distributed data connector - a distirbuted data connector based on PetaStorm supporting training framework to load data from distributed filesystem, and provide enhanced capabilities like data caching, sharing.
3. Smart Democratization Advisor - a human intelligence enhanced toolkit to generate sigopt recipes for Sigopt AutoML. It first generate optimized SigOpt recipes based on user choices and built-in intelligence, including optimized model parameters, optimized training framework parameters and set the training cluster environment, training framework parameters, and final target metrcis. Then it kicks off Siopt AutoML for optimization experiments to generate the best model.
4. Network Acclerator - A train-free NAS based componment to generate domain-specific compact network.
# In-Stock-Models
Currently four recommender system workloads were supported: including DLRM, DIEN, WnD and RecSys. The Bluewhale E2E AI democratization kit significantly improved the performance of those models on distributed CPU cluster, reduced the performance gap of CPU to GPU from 100x to < 2x, using the same dataset and the same AUC metrics.
# Perforamnce
.
# How To Use
[QuickStart](docs/source/quickstart.rst)
[Create New Advisor](docs/source/advanced.rst)
# How to Contribute
[Documenting Style](docs/source/documentingstyle.rst)
[Coding Style](docs/source/codingstyle.rst)
| AIDK | /AIDK-0.0.1.tar.gz/AIDK-0.0.1/README.md | README.md |
import os
import sys
from shutil import copyfile
from xml.dom.minidom import parse
from xml.dom.minidom import parseString
import xml.etree.ElementTree as ET
import TestObjects
import mvn
from pom_file import Pom
from jcov_tracer import JcovTracer
from jcov_parser import JcovParser
from junitparser.junitparser import Error, Failure
import tempfile
class TestResult(object):
def __init__(self, junit_test):
self.junit_test = junit_test
self.classname = junit_test.classname
self.name = junit_test.name
self.full_name = "{classname}.{name}".format(classname=self.classname, name=self.name).lower()
result = 'pass'
if type(junit_test.result) is Error:
result = 'error'
if type(junit_test.result) is Failure:
result = 'failure'
self.outcome = result
def __repr__(self):
return "{full_name}: {outcome}".format(full_name=self.full_name, outcome=self.outcome)
def is_passed(self):
return self.outcome == 'pass'
def get_observation(self):
return 0 if self.is_passed() else 1
def as_dict(self):
return {'_tast_name': self.full_name, '_outcome': self.outcome}
class Repo(object):
def __init__(self, repo_dir):
self._repo_dir = repo_dir
self.DEFAULT_ES_VERSION = '1.0.6'
self.DEFAULT_SUREFIRE_VERSION = '2.17'
self.DEFAULT_JUNIT_VERSION = '4.12'
@property
def repo_dir(self):
return self._repo_dir
# Executes mvn test
def install(self, module=None, testcases=[], time_limit=sys.maxint, debug=False):
inspected_module = self.repo_dir
if not module == None:
inspected_module = module
install_cmd = self.generate_mvn_install_cmd(module=inspected_module, testcases=testcases, debug=debug)
build_report = mvn.wrap_mvn_cmd(install_cmd, time_limit=time_limit, dir=self._repo_dir)
return build_report
# Executes mvn test
def test(self, module=None, tests=[], time_limit=sys.maxint):
inspected_module = self.repo_dir
if not module == None:
inspected_module = module
test_cmd = self.generate_mvn_test_cmd(module=inspected_module, tests=tests)
build_report = mvn.wrap_mvn_cmd(test_cmd, time_limit=time_limit)
return build_report
# Generates tests. As for now implemented with evosuite
def generate_tests(self, module=None, classes=[], time_limit=sys.maxint):
inspected_module = self.repo_dir
if not module == None:
inspected_module = module
if not self.tests_generator_setup(inspected_module):
self.setup_tests_generator(inspected_module)
test_cmd = self.generate_mvn_generate_tests_cmd(module=inspected_module, classes=classes)
build_report = mvn.wrap_mvn_cmd(test_cmd, time_limit=time_limit)
if os.path.exists(os.path.join(self.repo_dir, 'cutsFile.txt')):
os.remove(os.path.join(self.repo_dir, 'cutsFile.txt'))
return build_report
# Executes mvn clean
def clean(self, module=None):
inspected_module = self.repo_dir
if not module == None:
inspected_module = module
test_cmd = self.generate_mvn_clean_cmd(inspected_module)
build_report = mvn.wrap_mvn_cmd(test_cmd)
return build_report
# Executes mvn compile
def test_compile(self, module=None):
inspected_module = self.repo_dir
if not module == None:
inspected_module = module
test_cmd = self.generate_mvn_test_compile_cmd(inspected_module)
build_report = mvn.wrap_mvn_cmd(test_cmd)
return build_report
def get_test_results(self):
from junitparser import JUnitXml
from junitparser.junitparser import Error, Failure
SURFIRE_DIR_NAME = 'surefire-reports'
def get_surefire_files():
surefire_files = []
for root, _, files in os.walk(self._repo_dir):
for name in files:
if name.endswith('.xml') and os.path.basename(root) == SURFIRE_DIR_NAME:
surefire_files.append(os.path.join(root, name))
return surefire_files
class Test(object):
def __init__(self, junit_test):
self.junit_test = junit_test
self.classname = junit_test.classname
self.name = junit_test.name
self.full_name = "{classname}@{name}".format(classname=self.classname, name=self.name).lower()
result = 'pass'
if type(junit_test.result) is Error:
result = 'error'
if type(junit_test.result) is Failure:
result = 'failure'
self.outcome = result
def __repr__(self):
return "{full_name}: {outcome}".format(full_name=self.full_name, outcome=self.outcome)
def is_passed(self):
return self.outcome == 'pass'
def get_observation(self):
return 0 if self.is_passed() else 1
def as_dict(self):
return {'_tast_name': self.full_name, '_outcome': self.outcome}
outcomes = {}
for report in get_surefire_files():
try:
for case in JUnitXml.fromfile(report):
test = Test(case)
outcomes[test.full_name] = test
except:
pass
return outcomes
# Returns tests reports objects currently exists in this repo in path_to_reports
def parse_tests_reports(self, path_to_reports, module=None):
inspected_module = self.repo_dir
if not module == None:
inspected_module = module
ans = []
for filename in os.listdir(path_to_reports):
if filename.endswith(".xml"):
ans.append(TestObjects.TestClassReport(os.path.join(path_to_reports, filename), inspected_module))
return ans
# Gets path to tests object in repo, or in a cpsecifc module if specified
def get_tests(self, module=None):
ans = []
inspected_module = self.repo_dir
if not module == None:
inspected_module = module
if os.path.isdir(inspected_module + '\\src\\test'):
if os.path.isdir(inspected_module + '\\src\\test\\java'):
ans.extend(mvn.parse_tests(inspected_module + '\\src\\test\\java'))
else:
ans.extend(mvn.parse_tests(inspected_module + '\\src\\test'))
for filename in os.listdir(inspected_module):
file_abs_path = os.path.join(inspected_module, filename)
if os.path.isdir(file_abs_path):
if not (filename == 'src' or filename == '.git'):
ans.extend(self.get_tests(file_abs_path))
return ans
# Gets all the reports in the given module if given, else in the given module
def get_tests_reports(self, module=None):
ans = []
inspected_module = self.repo_dir
if not module == None:
inspected_module = module
path_to_reports = os.path.join(inspected_module, 'target\\surefire-reports')
if os.path.isdir(path_to_reports):
ans.extend(self.parse_tests_reports(path_to_reports, inspected_module))
for filename in os.listdir(inspected_module):
file_abs_path = os.path.join(inspected_module, filename)
if os.path.isdir(file_abs_path):
if not (filename == 'src' or filename == '.git'):
ans.extend(self.get_tests_reports(file_abs_path))
return ans
# Adds Tracer agent to surefire. Outpur of tracer goes to target
def setup_tracer(self, target=None):
agent_path_src = os.path.join(mvn.tracer_dir, r'target\uber-tracer-1.0.1-SNAPSHOT.jar')
if not os.path.isfile(agent_path_src):
os.system('mvn install -f {}'.format(mvn.tracer_dir))
agent_path_dst = os.path.join(self.repo_dir, 'agent.jar')
paths_path = os.path.join(self.repo_dir, 'paths.txt')
copyfile(agent_path_src, agent_path_dst)
with open(paths_path, 'w+') as paths:
paths.write(Repo.get_mvn_repo() + '\n')
paths.write(self.repo_dir)
self.add_argline_to_surefire('-javaagent:{}={}'.format(agent_path_dst, paths_path))
@staticmethod
def get_mvn_repo():
return os.path.join(os.environ['USERPROFILE'], '.m2\\repository')
def setup_jcov_tracer(self, path_to_classes_file=None, path_to_out_template=None, target_dir=None, class_path=None,
instrument_only_methods=True):
result_file = "result.xml"
if target_dir:
result_file = os.path.join(target_dir, result_file)
jcov = JcovTracer(self.repo_dir, path_to_out_template, path_to_classes_file, result_file, class_path=class_path,
instrument_only_methods=instrument_only_methods)
for pom_file in self.get_all_pom_paths(self._repo_dir):
pom = Pom(pom_file)
for value in jcov.get_values_to_add():
pom.add_pom_value(value)
return jcov
def run_under_jcov(self, target_dir, debug=False, instrument_only_methods=True):
self.test_compile()
f, path_to_classes_file = tempfile.mkstemp()
os.close(f)
f, path_to_template = tempfile.mkstemp()
os.close(f)
os.remove(path_to_template)
jcov = self.setup_jcov_tracer(path_to_classes_file, path_to_template, target_dir=target_dir,
class_path=Repo.get_mvn_repo(), instrument_only_methods=instrument_only_methods)
jcov.execute_jcov_process()
self.install(debug=debug)
jcov.stop_grabber()
os.remove(path_to_classes_file)
os.remove(path_to_template)
return JcovParser(target_dir).parse()
# Changes all the pom files in a module recursively
def get_all_pom_paths(self, module=None):
ans = []
inspected_module = self.repo_dir
if not module == None:
inspected_module = module
if os.path.isfile(os.path.join(inspected_module, 'pom.xml')):
ans.append(os.path.join(inspected_module, 'pom.xml'))
for file in os.listdir(inspected_module):
full_path = os.path.join(inspected_module, file)
if os.path.isdir(full_path):
ans.extend(self.get_all_pom_paths(full_path))
return ans
# Changes surefire version in a pom
def change_surefire_ver(self, version, module=None):
ans = []
inspected_module = self.repo_dir
if not module == None:
inspected_module = module
poms = self.get_all_pom_paths(inspected_module)
new_file_lines = []
for pom in poms:
xmlFile = parse(pom)
tmp_build_list = xmlFile.getElementsByTagName('build')
build_list = list(
filter(lambda b: not b.parentNode == None and b.parentNode.localName == 'project', tmp_build_list))
if len(build_list) == 0:
continue
assert len(build_list) == 1
plugins_tags = build_list[0].getElementsByTagName('plugins')
if len(plugins_tags) == 0:
continue
for plugins_tag in plugins_tags:
if plugins_tag.parentNode.localName == 'build':
artifacts_ids = list(
map(lambda a: str(a.firstChild.data), plugins_tag.getElementsByTagName('artifactId')))
if not any(a_id == 'maven-surefire-plugin' for a_id in artifacts_ids):
new_plugin = plugins_tag.ownerDocument.createElement(tagName='plugin')
new_group_id = new_plugin.ownerDocument.createElement(tagName='groupId')
new_artifact_id = new_plugin.ownerDocument.createElement(tagName='artifactId')
new_group_id_text = new_group_id.ownerDocument.createTextNode('org.apache.maven.plugins')
new_artifact_id_text = new_artifact_id.ownerDocument.createTextNode('maven-surefire-plugin')
new_group_id.appendChild(new_group_id_text)
new_plugin.appendChild(new_group_id)
new_artifact_id.appendChild(new_artifact_id_text)
new_plugin.appendChild(new_artifact_id)
plugins_tag.appendChild(new_plugin)
for plugins_tag in plugins_tags:
mvn.change_plugin_version_if_exists(plugins_tag, 'maven-surefire-plugin', version)
os.remove(pom)
with open(pom, 'w+') as f:
tmp_str = xmlFile.toprettyxml()
copy_tmp_str = ''
for char in tmp_str[::]:
if 125 < ord(char):
copy_tmp_str += 'X'
else:
copy_tmp_str += char
lines = list(filter(lambda line: len(line) > 0, copy_tmp_str.split('\n')))
for line in lines:
if not (all(c == ' ' for c in line) or all(c == '\t' for c in line)):
f.write(line + '\n')
# Changes surefire version in a pom
def add_argline_to_surefire(self, content):
inspected_module = self.repo_dir
poms = self.get_all_pom_paths(inspected_module)
for pom in poms:
xmlFile = parse(pom)
tmp_build_list = xmlFile.getElementsByTagName('build')
build_list = list(
filter(lambda b: not b.parentNode == None and b.parentNode.localName == 'project', tmp_build_list))
if len(build_list) == 0:
continue
assert len(build_list) == 1
plugins_tags = build_list[0].getElementsByTagName('plugins')
if len(plugins_tags) == 0:
continue
for plugins_tag in plugins_tags:
if plugins_tag.parentNode.localName == 'build':
artifacts_ids = list(
map(lambda a: str(a.firstChild.data), plugins_tag.getElementsByTagName('artifactId')))
if not any(a_id == 'maven-surefire-plugin' for a_id in artifacts_ids):
new_plugin = plugins_tag.ownerDocument.createElement(tagName='plugin')
new_group_id = new_plugin.ownerDocument.createElement(tagName='groupId')
new_artifact_id = new_plugin.ownerDocument.createElement(tagName='artifactId')
new_group_id_text = new_group_id.ownerDocument.createTextNode('org.apache.maven.plugins')
new_artifact_id_text = new_artifact_id.ownerDocument.createTextNode('maven-surefire-plugin')
new_group_id.appendChild(new_group_id_text)
new_plugin.appendChild(new_group_id)
new_artifact_id.appendChild(new_artifact_id_text)
new_plugin.appendChild(new_artifact_id)
plugins_tag.appendChild(new_plugin)
for plugins_tag in plugins_tags:
mvn.add_plugin_configuration_argline(plugins_tag, 'maven-surefire-plugin', content=content)
os.remove(pom)
with open(pom, 'w+') as f:
tmp_str = xmlFile.toprettyxml()
copy_tmp_str = ''
for char in tmp_str[::]:
if 125 < ord(char):
copy_tmp_str += 'X'
else:
copy_tmp_str += char
lines = list(filter(lambda line: len(line) > 0, copy_tmp_str.split('\n')))
for line in lines:
if not (all(c == ' ' for c in line) or all(c == '\t' for c in line)):
f.write(line + '\n')
def add_element_to_pom(self, pom_path, path, path_filter, element_name, element_value, add_new_element=True):
"""
add element to pom file, used to modify the surefire plugin
:param pom_path: the path to the pom.xml to modify
:param path: the path to the element in the pom.xml (list of strings)
:param element_name: name of the element to add
:param element_value: the value to add
:param add_new_element: if True add new element, else append to existing element
"""
xml.etree.ElementTree.register_namespace('', "http://maven.apache.org/POM/4.0.0")
xml.etree.ElementTree.register_namespace('xsi', "http://www.w3.org/2001/XMLSchema-instance")
def get_children_by_name(element, name):
return filter(lambda e: e.tag.endswith(name), element.getchildren())
def get_or_create_child(element, name):
child = get_children_by_name(element, name)
if len(child) == 0:
return xml.etree.ElementTree.SubElement(element, name)
else:
return child[0]
et = xml.etree.ElementTree.parse(pom_path)
path = ['build', 'plugins', 'plugin']
elements = et.getroot()
for name in path:
elements = reduce(list.__add__, map(lambda elem: get_children_by_name(elem, name), elements), [])
surfire_plugins = filter(lambda plugin: filter(lambda x: x.text == "maven-surefire-plugin",
get_children_by_name(plugin, "artifactId")),
filter(lambda e: e.tag.endswith('plugin'), et.getroot().iter()))
pass
def run_function_on_poms_by_filter(self, pom_filter, function, *args, **kwargs):
map(lambda pom: function(pom, *args, **kwargs), filter(pom_filter, self.get_all_pom_paths(self._repo_dir)))
# Returns mvn command string that runns the given tests in the given module
def generate_mvn_test_cmd(self, tests, module=None):
mvn_names = list(map(lambda t: t.mvn_name, tests))
if module == None or module == self.repo_dir:
ans = 'mvn test -fn'
else:
ans = 'mvn -pl :{} -am test -fn'.format(
os.path.basename(module))
# ans = 'mvn test surefire:test -DfailIfNoTests=false -Dmaven.test.failure.ignore=true -Dtest='
ans += ' -DfailIfNoTests=false'
if len(mvn_names) > 0:
ans += ' -Dtest='
for mvn_name in mvn_names:
if not ans.endswith('='):
ans += ','
ans += mvn_name
ans += ' -f ' + self.repo_dir
return ans
# Returns mvn command string that generates tests for the given module
def generate_mvn_generate_tests_cmd(self, classes, module=None):
if module == None or module == self.repo_dir:
ans = 'mvn evosuite:generate evosuite:export -fn'
else:
ans = 'mvn -pl :{} -am evosuite:generate -fn'.format(
os.path.basename(module))
if len(classes) > 0:
path_to_cutsfile = os.path.join(self.repo_dir,"cutsFile.txt")
with open(path_to_cutsfile , "w+") as tmp_file:
tmp_file.write(' ,'.join(classes))
ans += ' '
ans += ' -DcutsFile="{}"'.format(path_to_cutsfile)
ans += ' -f ' + self.repo_dir
return ans
# Returns mvn command string that runns the given tests in the given module
def generate_mvn_install_cmd(self, testcases, module=None, debug=False):
testclasses = []
for testcase in testcases:
if not testcase.parent in testclasses:
testclasses.append(testcase.parent)
if module == None or module == self.repo_dir:
ans = 'mvn install -fn -Drat.skip=true -Drat.ignoreErrors=true -Drat.numUnapprovedLicenses=10000 -Djacoco.skip=true -DfailIfNoTests=false'
else:
ans = 'mvn -pl :{} -am install -Drat.skip=true -Drat.ignoreErrors=true -Drat.numUnapprovedLicenses=10000 -Djacoco.skip=true -fn'.format(
os.path.basename(module))
# ans = 'mvn test surefire:test -DfailIfNoTests=false -Dmaven.test.failure.ignore=true -Dtest='
ans += ' -DfailIfNoTests=false'
if debug:
ans += ' -Dmaven.surefire.debug="-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 -Xnoagent -Djava.compiler=NONE"'
if len(testcases) > 0:
ans += ' -Dtest='
for testclass in testclasses:
if not ans.endswith('='):
ans += ','
ans += testclass.mvn_name
# ans += ' -f ' + self.repo_dir
return ans
# Returns mvn command string that compiles the given the given module
def generate_mvn_test_compile_cmd(self, module):
if module == self.repo_dir:
ans = 'mvn test-compile -fn -Drat.skip=true -Drat.ignoreErrors=true -Drat.numUnapprovedLicenses=10000'
else:
ans = 'mvn -pl :{} -am test-compile -fn -Drat.skip=true -Drat.ignoreErrors=true -Drat.numUnapprovedLicenses=10000'.format(
os.path.basename(module))
ans += ' -f ' + self.repo_dir
return ans
# Returns mvn command string that cleans the given the given module
def generate_mvn_clean_cmd(self, module):
if module == self.repo_dir:
ans = 'mvn clean '
else:
ans = 'mvn -pl :{} -am clean -fn'.format(
os.path.basename(module))
ans += ' -f ' + self.repo_dir
return ans
# Returns mvn command string that prints evosuite help material
def generate_mvn_evosuite_help_cmd(self, module):
if module == self.repo_dir:
ans = 'mvn evosuite:help '
else:
ans = 'mvn -pl :{} -am mvn evosuite:help -fn'.format(
os.path.basename(module))
ans += ' -f ' + self.repo_dir
return ans
# Add tags to the pom. xquey is a string written in xpath aka xquery convention
# Behaviour is unknown if the xquery doesn't refer to a single tag
def set_pom_tag(self, xquery, value, module='', create_if_not_exist=False):
pom = self.get_pom(module)
root = ET.parse(pom).getroot()
xmlns, _ = mvn.tag_uri_and_name(root)
if not xmlns == '':
tmp_tags_1 = xquery.split('/')
tmp_tags_2 = list(map(lambda t: self.add_xmlns_prefix(xmlns, t), tmp_tags_1))
tags = list(map(lambda t: self.clean_query_string(t), tmp_tags_2))
tag = self.get_tag(root, tags[1:], create_if_not_exist=create_if_not_exist)
tag.text = value
self.rewrite_pom(root=root, module=module)
# Gets the tag specified in the xquery
def get_pom_tag(self, xquery, module=''):
pom = self.get_pom(module)
root = ET.parse(pom).getroot()
xmlns, _ = mvn.tag_uri_and_name(root)
if not xmlns == '':
tmp_tags_1 = xquery.split('/')
tmp_tags_2 = list(map(lambda t: self.add_xmlns_prefix(xmlns, t), tmp_tags_1))
tags = list(map(lambda t: self.clean_query_string(t), tmp_tags_2))
return self.get_tag(root, tags[1:])
# Recursively add element to tag
def get_tag(self, root_tag, subtags_path_array, create_if_not_exist=False):
if len(subtags_path_array) == 0:
return root_tag
next_tag_list = root_tag.findall(subtags_path_array[0])
if len(next_tag_list) == 0:
if create_if_not_exist:
condition = ''
tag_and_cond = subtags_path_array[0].replace(']', '').split('[')
tag_name = tag_and_cond[0]
if len(tag_and_cond) > 1:
condition = tag_and_cond[1]
new_tag = ET.SubElement(root_tag, tag_name)
if not condition == '':
[elem_name, val] = condition.split('=')
new_tag_attr = ET.SubElement(new_tag, elem_name)
new_tag_attr.text = val.strip('\'')
return self.get_tag(root_tag=new_tag, subtags_path_array=subtags_path_array[1:],
create_if_not_exist=create_if_not_exist)
else:
return None
if len(next_tag_list) > 1:
return None
next_tag = next_tag_list[0]
return self.get_tag(root_tag=next_tag, subtags_path_array=subtags_path_array[1:],
create_if_not_exist=create_if_not_exist)
def rewrite_pom(self, root, module=''):
pom = os.path.join(module, 'pom.xml')
rough_string = ET.tostring(root, 'utf-8')
reparsed = parseString(rough_string).toprettyxml().replace('</ns0:', '</').replace('<ns0:', '<')
os.remove(pom)
with open(pom, 'w+') as f:
tmp_str = reparsed
copy_tmp_str = ''
for char in tmp_str[::]:
if 125 < ord(char):
copy_tmp_str += 'X'
else:
copy_tmp_str += char
lines = list(filter(lambda line: len(line) > 0, copy_tmp_str.split('\n')))
for line in lines:
if not (all(c == ' ' for c in line) or all(c == '\t' for c in line)):
f.write(line + '\n')
def observe_tests(self):
from junitparser import JUnitXml, junitparser
outcomes = {}
for report in self.get_surefire_files():
try:
for case in JUnitXml.fromfile(report):
test = TestResult(case)
outcomes[test.full_name] = test
except Exception as e:
pass
return outcomes
def get_surefire_files(self):
SURFIRE_DIR_NAME = 'surefire-reports'
surefire_files = []
for root, _, files in os.walk(self.repo_dir):
for name in files:
if name.endswith('.xml') and os.path.basename(root) == SURFIRE_DIR_NAME:
surefire_files.append(os.path.join(root, name))
return surefire_files
# Returns the dictionary that map testcase string to its traces strings
def get_traces(self, testcase_name=''):
ans = {}
debugger_tests_dir = os.path.relpath(os.path.join(self.repo_dir, r'../../DebuggerTests'))
if not os.path.isdir(debugger_tests_dir):
return ans
for filename in os.listdir(debugger_tests_dir):
if (filename.startswith('Trace_') or filename.endswith(".txt")) and testcase_name.replace('#',
'@') in filename:
with open(os.path.join(debugger_tests_dir, filename), 'r') as file:
key = filename.replace('.txt', '')
ans[key] = []
tmp = file.readlines()
for trace in tmp:
function_name = trace.replace('@', '#').replace('\n', '').split(' ')[-1]
if not function_name in ans[key]:
ans[key].append(str(function_name))
return ans
# Returns the dictionary that map testcase string to its traces strings
def get_trace(self, testcase_name):
ans = []
dict = self.get_traces(testcase_name=testcase_name)
if not len(dict) == 1:
return ans
ans = dict[dict.keys()[0]]
return ans
# Returns the pom path associated with the given module
def get_pom(self, module):
if module == '':
module = self.repo_dir
pom_singelton = list(
filter(lambda f: f == 'pom.xml', os.listdir(module))
)
if not len(pom_singelton) == 1:
return ''
else:
return os.path.join(module, pom_singelton[0])
# Adds the xmlns prefix to the tag
def add_xmlns_prefix(self, xmlns, tag):
prefix = '{' + xmlns + '}'
with_prefix = ''
if tag == '.':
return tag
if tag.startswith(prefix):
with_prefix = tag
else:
with_prefix = prefix + tag
if with_prefix.find('[') < with_prefix.find(']'):
[tag_name, condition] = with_prefix.split('[')
condition = condition.replace(']', '')
[elem_name, val] = condition.split('=')
elem_with_prefix = self.add_xmlns_prefix(xmlns, elem_name)
with_prefix = tag_name + '[' + elem_with_prefix + '=' + val + ']'
return with_prefix
# Removes redundant chars from the given query to validate it
def clean_query_string(self, xquery):
ans = xquery
while ' = ' in ans or ' =' in ans or '= ' in ans:
ans = ans.replace(' = ', '=')
ans = ans.replace(' =', '=')
ans = ans.replace('= ', '=')
return ans
# Returns true if self has tests generator setup
def tests_generator_setup(self, module):
mvn_help_cmd = self.generate_mvn_evosuite_help_cmd(module)
EVOUSUITE_CONFIGURED_INDICATION = 'evosuite:generate'
with os.popen(mvn_help_cmd) as proc:
tmp_file_path = 'tmp_file.txt'
with open(tmp_file_path, "w+") as tmp_file:
mvn.duplicate_stdout(proc, tmp_file)
with open(tmp_file_path, "r") as tmp_file:
mvn.duplicate_stdout(proc, tmp_file)
build_report = tmp_file.readlines()
return any(list(map(lambda l: EVOUSUITE_CONFIGURED_INDICATION in l, build_report)))
def setup_tests_generator(self, module):
evousuite_version_property_xquery = '/'.join(['.','properties','evosuiteVersion'])
self.set_pom_tag(xquery=evousuite_version_property_xquery, create_if_not_exist=True, module=module,
value=self.DEFAULT_ES_VERSION)
self.add_plugin(artifactId='evosuite-maven-plugin', groupId='org.evosuite.plugins',
version='${evosuiteVersion}', module=module)
self.add_plugin(artifactId='maven-surefire-plugin', groupId='org.apache.maven.plugins',
version=self.DEFAULT_SUREFIRE_VERSION, module=module)
self.add_dependency(artifactId='evosuite-standalone-runtime', groupId='org.evosuite',
version='${evosuiteVersion}', module=module)
self.add_dependency(artifactId='junit', groupId='junit',
version=self.DEFAULT_JUNIT_VERSION, module=module)
evousuite_xpath = r"./build/plugins/plugin[artifactId = 'evosuite-maven-plugin']"
surefire_xpath = r"./build/plugins/plugin[artifactId = 'maven-surefire-plugin']"
execution_xpath = "executions/execution"
prepare_goal_xquery = '/'.join([evousuite_xpath, execution_xpath, "goals/goal"])
phase_xquery = '/'.join([evousuite_xpath, execution_xpath, "phase"])
listener_name_xquery = '/'.join([surefire_xpath, 'configuration', 'properties', 'property', 'name'])
listener_value_xquery = '/'.join([surefire_xpath, 'configuration', 'properties', 'property', 'value'])
self.set_pom_tag(xquery=prepare_goal_xquery, create_if_not_exist=True, module=module, value='prepare')
self.set_pom_tag(xquery=phase_xquery, create_if_not_exist=True, module=module, value='process-test-classes')
self.set_pom_tag(xquery=listener_name_xquery, create_if_not_exist=True, module=module, value='listener')
self.set_pom_tag(xquery=listener_value_xquery, create_if_not_exist=True, module=module,
value='org.evosuite.runtime.InitializingListener')
def add_plugin(self, artifactId, groupId, version, module):
plugin_xpath = r"./build/plugins/plugin[artifactId = '{}']".format(artifactId)
set_groupId_xquery = '/'.join([plugin_xpath, "groupId"])
set_version_xquery = '/'.join([plugin_xpath, "version"])
self.set_pom_tag(xquery=set_groupId_xquery, create_if_not_exist=True, module=module, value=groupId)
self.set_pom_tag(xquery=set_version_xquery, create_if_not_exist=True, module=module, value=version)
def add_dependency(self, artifactId, groupId, version, module):
dependency_xpath = r"./dependencies/dependency[artifactId = '{}']".format(artifactId)
set_groupId_xquery = '/'.join([dependency_xpath, "groupId"])
set_version_xquery = '/'.join([dependency_xpath, "version"])
self.set_pom_tag(xquery=set_groupId_xquery, create_if_not_exist=True, module=module, value=groupId)
self.set_pom_tag(xquery=set_version_xquery, create_if_not_exist=True, module=module, value=version)
if __name__ == "__main__":
# repo = Repo(r"C:\amirelm\projects_minors\JEXL\version_to_test_trace\repo")
# obs = repo.observe_tests()
# pass
# traces = JcovParser(r"C:\temp\traces").parse()
import time
start = time.time()
print "start time:", start
repo = Repo(r"C:\Temp\tika")
repo.run_under_jcov(r"C:\temp\traces", False, instrument_only_methods=True)
print "end time:", time.time() - start | AIDnD-mvnpy | /AIDnD_mvnpy-1.0.1.tar.gz/AIDnD_mvnpy-1.0.1/mvnpy/Repo.py | Repo.py |
import copy
import os
import pickle
import shutil
from enum import Enum
import csv
import traceback
from sfl_diagnoser.Diagnoser import diagnoserUtils
class Bug(object):
def __init__(self, issue_key, commit_hexsha, parent_hexsha, fixed_testcase, bugged_testcase, type, valid, desc, traces = [], bugged_components = []):
self._issue_key = issue_key
self._commit_hexsha = commit_hexsha
self._parent_hexsha = parent_hexsha
self._fixed_testcase = fixed_testcase
self._bugged_testcase = bugged_testcase
self._module = os.path.basename(bugged_testcase.module)
self._type = type
self._desc = desc
self._valid = valid
self._traces = traces
self._bugged_components = bugged_components
self._has_annotations = 'Test' in list(map(lambda a: a.name, self._bugged_testcase.method.annotations))
@property
def issue(self):
return self._issue_key
@property
def commit(self):
return self._commit_hexsha
@property
def parent(self):
return self._parent_hexsha
@property
def bugged_testcase(self):
return self._bugged_testcase
@property
def fixed_testcase(self):
return self._bugged_testcase
@property
def traces(self):
return self._traces
@property
def bugged_components(self):
return self._bugged_components
@property
def desctiption(self):
return self._desc
@property
def type(self):
return self._type
@property
def valid(self):
return self._valid
@property
def has_test_annotation(self):
return self._has_annotations
@property
def module(self):
return self._module
def __str__(self):
return 'type: ' + self.type.value + ' ,issue: ' + self.issue + ' ,commit: ' + self._commit_hexsha+ ' ,parent: ' + self.parent+ ' ,test: ' + self.bugged_testcase.mvn_name + ' description: ' + self._desc
class Bug_data_handler(object):
def __init__(self, path):
self._path = path
self._valid_bugs_csv_handler = Bug_csv_report_handler(os.path.join(self._path, 'valid_bugs.csv'))
self._invalid_bugs_csv_handler = Bug_csv_report_handler(os.path.join(self._path, 'invalid_bugs.csv'))
self._time_csv_handler = Time_csv_report_handler(os.path.join(self._path, 'times.csv'))
@property
def path(self):
return self._path
# Adds bug the data
def add_bug(self, bug):
if bug.valid:
self._valid_bugs_csv_handler.add_bug(bug)
else:
self._invalid_bugs_csv_handler.add_bug(bug)
self._store_bug(bug)
# Adds row to the time tanle
def add_time(self, issue_key, commit_hexsha, module, time, desctiption = ''):
self._time_csv_handler.add_row(issue_key, commit_hexsha, module, time, desctiption)
# Stores bug in it's direcrtory
def _store_bug(self,bug):
path_to_bug_testclass =self.get_bug_testclass_path(bug)
if not os.path.exists(path_to_bug_testclass):
os.makedirs(path_to_bug_testclass)
bug_path =self.get_bug_path(bug)
with open(bug_path, 'wb') as bug_file:
pickle.dump(bug, bug_file , protocol=2)
if len(bug.traces) > 0 and len(bug.bugged_components) > 0:
try:
matrix_path = os.path.join(path_to_bug_testclass, 'Matrix_'+bug.bugged_testcase.method.name+'.txt')
diagnoserUtils.write_planning_file(
out_path = matrix_path,
bugs = bug.bugged_components,
tests_details = [(bug.bugged_testcase.mvn_name, bug.traces, int(bug.bugged_testcase.passed))]
)
except Exception as e:
raise BugError(msg='Failed to create the matrix. Error:\n'+e.msg+'\n'+traceback.format_exc())
# Adds bugs to the csv file
def add_bugs(self, bugs):
self._valid_bugs_csv_handler.add_bugs(list(filter(lambda b: b.valid, bugs)))
self._invalid_bugs_csv_handler.add_bugs(list(filter(lambda b: not b.valid, bugs)))
for bug in bugs:
try:
self._store_bug(bug)
except BugError as e:
raise e
# Attach reports to the testclasses directories
def attach_reports(self, issue, commit, testcases):
testclasses = []
for testcase in testcases:
if not testcase.parent in testclasses:
testclasses.append(testcase.parent)
for testclass in testclasses:
testclass_path = self.get_testclass_path(issue.key,commit.hexsha, testclass.id)
report_copy_path = os.path.join(testclass_path, os.path.basename(testclass.get_report_path()))
os.system(
'copy ' + testclass.get_report_path().replace('/', '\\') + ' ' + report_copy_path.replace('/', '\\'))
# Gets the path to the bug's testclass directory
def get_bug_testclass_path(self, bug):
return os.path.join(self.path, '/'.join([bug.issue, bug.commit, bug.bugged_testcase.parent.id]))
# Gets the path to the directory of the testclass in the given commit and issue
def get_testclass_path(self, issue_key, commit_hexsha, testclass_id):
return os.path.join(self.path,'/'.join([issue_key,commit_hexsha, testclass_id]))
# Gets the path to the directory of the testclass in the given commit and issue
def get_bug_path(self, bug):
return os.path.join(self.get_bug_testclass_path(bug), bug.bugged_testcase.method.name + '.pickle')
# Sets up dirctories for bug results
def set_up_bug_dir(self, issue, commit, testclasses):
ans = {}
path_to_bug_dir = os.path.join(self.path, issue.key)
if not os.path.isdir(path_to_bug_dir):
os.makedirs(path_to_bug_dir)
path_to_bug_dir = os.path.join(path_to_bug_dir, commit.hexsha)
if not os.path.isdir(path_to_bug_dir):
os.makedirs(path_to_bug_dir)
for testclass in testclasses:
path_to_testclass_dir = os.path.join(path_to_bug_dir, testclass.id)
if not os.path.isdir(path_to_testclass_dir):
os.makedirs(path_to_testclass_dir)
ans[testclass.id] = path_to_testclass_dir
return ans
# Gets all the data from fb_path
def fetch_all_data(self, db_path):
copytree(db_path,self.path)
def fetch_issue_data(self, db_path, issue):
copytree(os.path.join(db_path, issue), os.path.join(self.path, issue))
# Gets all the bugs in issue_key in fixed commit_hexsha
def get_bugs(self, issue_key, commit_hexsha):
ans = []
issue_dir = os.path.join(self.path, issue_key)
commit_dir = os.path.join(issue_dir, commit_hexsha)
if not os.path.isdir(commit_dir):
return ans
for filename in os.listdir(commit_dir):
full_file_path = os.path.join(commit_dir, filename)
if os.path.isdir(full_file_path):
ans+= self.get_testclass_bugs(full_file_path)
return ans
# Returns all teh bugs in testcalss path
def get_testclass_bugs(self, testclass_path):
ans = []
for filename in os.listdir(testclass_path):
if filename.endswith(".pickle"):
full_path = os.path.join(testclass_path, filename)
with open(full_path, 'rb') as bug_file:
ans.append(pickle.load(bug_file))
return ans
# Gets the patch applying bug
def get_patch(self, bug):
testclass_path = self.get_testclass_path(bug.issue, bug.commit, bug.bugged_testcase.parent.id)
for filename in os.listdir(testclass_path):
if filename.endswith(".patch"):
return os.path.join(testclass_path, filename)
# Gets valid_bugs_tuiplles
def get_valid_bugs(self):
ans = []
with open(self._valid_bugs_csv_handler.path, 'r') as f:
reader = csv.reader(f)
ans = list(reader)
return ans
# Gets invalid_bugs_tuiplles
def get_invalid_bugs(self):
ans = []
with open(self._invalid_bugs_csv_handler.path, 'r') as f:
reader = csv.reader(f)
ans = list(reader)
return ans
# Gets invalid_bugs_tuiplles
def get_times(self):
ans = []
with open(self._time_csv_handler.path, 'r') as f:
reader = csv.reader(f)
ans = list(reader)
return ans
class Bug_csv_report_handler(object):
def __init__(self, path):
self._writer = None
self._path = path
self._fieldnames = ['valid','type','issue','module','commit','parent', 'testcase', 'has_test_annotation','description', 'traces', 'bugged_components']
if not os.path.exists(path):
with open(self._path, 'w+') as csv_output:
writer = csv.DictWriter(csv_output, fieldnames=self._fieldnames,lineterminator='\n')
writer.writeheader()
#Adds bug to the csv file
def add_bug(self, bug):
with open(self._path, 'a') as csv_output:
writer = csv.DictWriter(csv_output, fieldnames=self._fieldnames, lineterminator='\n')
writer.writerow(self.generate_csv_tupple(bug))
# Adds bugs to the csv file
def add_bugs(self, bugs):
with open(self._path, 'a') as csv_output:
writer = csv.DictWriter(csv_output, fieldnames=self._fieldnames, lineterminator='\n')
for bug in bugs:
writer.writerow(self.generate_csv_tupple(bug))
# Generated csv bug tupple
def generate_csv_tupple(self, bug):
return {'valid': bug.valid,
'type': bug.type.value,
'issue': bug.issue,
'module': bug.module,
'commit': bug.commit,
'parent': bug.parent,
'testcase': bug.bugged_testcase.mvn_name,
'has_test_annotation': bug.has_test_annotation,
'description': bug.desctiption,
'traces': bug.traces,
'bugged_components': bug.bugged_components
}
@property
def path(self):
return self._path
class Time_csv_report_handler(object):
def __init__(self, path):
self._writer = None
self._path = path
self._fieldnames = ['issue', 'commit','module', 'time', 'description']
if not os.path.exists(path):
with open(self._path, 'w+') as csv_output:
writer = csv.DictWriter(csv_output, fieldnames=self._fieldnames, lineterminator='\n')
writer.writeheader()
#Adds bug to the csv file
def add_row(self, issue_key, commit_hexsha, module, time, description):
with open(self._path, 'a') as csv_output:
writer = csv.DictWriter(csv_output, fieldnames=self._fieldnames, lineterminator='\n')
writer.writerow(self.generate_csv_tupple(issue_key, commit_hexsha, module, time, description))
# Generated csv bug tupple
def generate_csv_tupple(self, issue_key, commit_hexsha, module, time, description):
return {
'issue': issue_key,
'commit': commit_hexsha,
'module': module,
'time': time,
'description': description
}
@property
def path(self):
return self._path
class BugError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
invalid_comp_error_desc = 'testcase genrated compilation error when patched'
invalid_rt_error_desc = 'testcase genrated runtime error when tested'
invalid_passed_desc = 'testcase passed in parent'
invalid_not_fixed_failed_desc = 'testcase failed in commit'
invalid_not_fixed__error_desc = 'testcase generated runtime error in commit'
class Bug_type(Enum):
DELTA = "Delta"
DELTA_2 = "Delta^2"
DELTA_3 = "Delta^3"
REGRESSION = "Regression"
def __str__(self):
return self.value
def __repr__(self):
return self.value
def create_bug(issue, commit, parent, testcase, parent_testcase, type, traces, bugged_components):
if testcase.passed and parent_testcase.failed:
return Bug(issue_key=issue.key, commit_hexsha=commit.hexsha, parent_hexsha=parent.hexsha,
fixed_testcase=testcase,bugged_testcase=parent_testcase, type=type, valid=True,desc='',
traces=traces, bugged_components=bugged_components)
elif testcase.passed and parent_testcase.has_error:
return Bug(issue_key=issue.key, commit_hexsha=commit.hexsha, parent_hexsha=parent.hexsha,
fixed_testcase=testcase, bugged_testcase=parent_testcase,
type=type,valid=False,desc=invalid_rt_error_desc + ' ' + parent_testcase.get_error(),
traces = traces, bugged_components = bugged_components)
elif testcase.passed and parent_testcase.passed:
return Bug(issue_key=issue.key, commit_hexsha=commit.hexsha, parent_hexsha=parent.hexsha,
fixed_testcase=testcase, bugged_testcase=parent_testcase,
type=type,valid=False, desc=invalid_passed_desc,traces=traces, bugged_components=bugged_components)
elif testcase.failed:
return Bug(issue_key=issue.key, commit_hexsha=commit.hexsha, parent_hexsha=parent.hexsha,
fixed_testcase=testcase, bugged_testcase=parent_testcase,
type=type,valid=False, desc=invalid_not_fixed_failed_desc,traces=traces, bugged_components=bugged_components)
elif testcase.has_error:
return Bug(issue_key=issue.key, commit_hexsha=commit.hexsha, parent_hexsha=parent.hexsha,
fixed_testcase=testcase, bugged_testcase=parent_testcase,
type=type,valid=False,desc=invalid_not_fixed__error_desc+' '+testcase.get_error(),traces=traces, bugged_components=bugged_components)
else:
assert 0==1
# copy directory from stackoverflow
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s) and not os.path.isdir(d):
shutil.copytree(s, d, symlinks, ignore)
elif os.path.isfile(s) and not os.path.isfile(d):
shutil.copy2(s, d)
# Returns the ype of the bug
def determine_type(testcase, delta_testcases):
if testcase in delta_testcases:
return Bug_type.DELTA
else:
return Bug_type.REGRESSION | AIDnD-mvnpy | /AIDnD_mvnpy-1.0.1.tar.gz/AIDnD_mvnpy-1.0.1/mvnpy/bug.py | bug.py |
import xml.etree.cElementTree as et
et.register_namespace('', "http://maven.apache.org/POM/4.0.0")
et.register_namespace('xsi', "http://www.w3.org/2001/XMLSchema-instance")
def is_surefire_plugin(plugin):
return filter(lambda x: x.text == PomPlugin.SUREFIRE_ARTIFACT_ID, Pom.get_children_by_name(plugin, PomPlugin.ARTIFACT_ID_NAME))
def is_junit_plugin(plugin):
return filter(lambda x: x.text == PomPlugin.JUNIT_ARTIFACT_ID, Pom.get_children_by_name(plugin, PomPlugin.ARTIFACT_ID_NAME))
class PomPlugin(object):
PLUGINS_PATH = ['build', 'plugins', 'plugin']
SUREFIRE_ARTIFACT_ID = "maven-surefire-plugin"
JUNIT_ARTIFACT_ID = "junit"
ARTIFACT_ID_NAME = "artifactId"
PLUGINS = {"maven-surefire-plugin": is_surefire_plugin}
@staticmethod
def get_plugins(pom):
return pom.get_elements_by_path(PomPlugin.PLUGINS_PATH)
@staticmethod
def get_plugin_by_name(pom, plugin_name):
assert plugin_name in PomPlugin.PLUGINS
return filter(PomPlugin.PLUGINS[plugin_name], PomPlugin.get_plugins(pom))
class PomValue(object):
def __init__(self, plugin_name, path_to_create, value, should_append=True):
self.plugin_name = plugin_name
self.path_to_create = path_to_create
self.value = value
self.should_append = should_append
def is_plugin(self):
return self.plugin_name
class Pom(object):
def __init__(self, pom_path):
self.pom_path = pom_path
self.element_tree = et.parse(self.pom_path)
self.set_junit_version()
@staticmethod
def get_children_by_name(element, name):
return filter(lambda e: e.tag.endswith(name), element.getchildren())
@staticmethod
def get_or_create_child(element, name):
child = Pom.get_children_by_name(element, name)
if len(child) == 0:
return et.SubElement(element, name)
else:
return child[0]
@staticmethod
def get_or_create_by_path(element, path):
for name in path:
element = Pom.get_or_create_child(element, name)
return element
def get_elements_by_path(self, path):
elements = [self.element_tree.getroot()]
for name in path:
elements = reduce(list.__add__, map(lambda elem: Pom.get_children_by_name(elem, name), elements), [])
return elements
def add_pom_value(self, pom_value):
plugins_path = PomPlugin.get_plugin_by_name(self, pom_value.plugin_name)
for plugin_path in plugins_path:
created_element = Pom.get_or_create_by_path(plugin_path, pom_value.path_to_create)
element_text = ''
if pom_value.should_append and created_element.text:
element_text = created_element.text + ' '
element_text += pom_value.value
created_element.text = element_text
self.save()
def set_junit_version(self, version='4.11'):
junit_dependencies = filter(is_junit_plugin, self.get_elements_by_path(['dependencies', 'dependency']))
for dependency in junit_dependencies:
created_element = Pom.get_or_create_by_path(dependency, ['version'])
created_element.text = version
self.save()
def save(self):
self.element_tree.write(self.pom_path, xml_declaration=True) | AIDnD-mvnpy | /AIDnD_mvnpy-1.0.1.tar.gz/AIDnD_mvnpy-1.0.1/mvnpy/pom_file.py | pom_file.py |
import os
import xml.etree.ElementTree as ET
import re
import javalang
class TestClass(object):
def __init__(self, file_path):
self._path = os.path.realpath(file_path)
self._module = self.find_module(self._path)
self._mvn_name = self.generate_mvn_name()
self._testcases = []
self._report = None
self._id = '#'.join([os.path.basename(self.module), self.mvn_name])
with open(self._path, 'r') as src_file:
try:
self._tree = javalang.parse.parse(src_file.read())
except UnicodeDecodeError as e:
raise TestParserException('Java file parsing problem:'+'\n'+str(e))
class_decls = [class_dec for _, class_dec in self.tree.filter(javalang.tree.ClassDeclaration)]
for class_decl in class_decls:
for method in class_decl.methods:
if self.is_valid_testcase(method):
self._testcases.append(TestCase(method, class_decl, self))
@property
def mvn_name(self):
return self._mvn_name
@property
def src_path(self):
return self._path
@property
def testcases(self):
return self._testcases
@property
def module(self):
return self._module
@property
def report(self):
return self._report
@report.setter
def report(self, report):
self._report = report
@property
def tree(self):
return self._tree
@property
def id(self):
return self._id
def parse_src_path(self):
ans = self.module
ans += '\\src\\test\\java'
packages = self.name.split('.')
for p in packages:
ans += '\\' + p
return ans + '.java'
def get_report_path(self):
return self.module + '\\target\\surefire-reports\\' + 'TEST-' + self.mvn_name + '.xml'
def attach_report_to_testcase(self, testcase):
try:
testcase.report = self.report.get_testcase_report(testcase.mvn_name)
except TestParserException as e:
self.report = None
raise e
# Looking for report, and if finds one, attach it to the self and al it's testcases
def look_for_report(self):
try:
self.report = TestClassReport(self.get_report_path(), self.module)
for t in self.testcases:
self.attach_report_to_testcase(t)
except TestParserException:
pass
def clear_report(self):
self.report = None
for t in self.testcases:
t.clear_report()
def find_module(self, file_path):
parent_dir = os.path.abspath(os.path.join(file_path, os.pardir))
is_root = False
while not is_root:
if os.path.isfile(parent_dir + '//pom.xml'):
return parent_dir
else:
tmp = os.path.abspath(os.path.join(parent_dir, os.pardir))
is_root = tmp == parent_dir
parent_dir = tmp
raise TestParserException(file_path + ' is not part of a maven module')
def is_valid_testcase(self, method):
return method.name.lower() != 'setup' and method.name.lower() != 'teardown' and\
len(method.parameters)==0 and method.return_type==None
def generate_mvn_name(self):
relpath = os.path.relpath(self.src_path, self.module + '\\src\\test\\java').replace('.java', '')
if relpath.startswith('..\\'):
relpath = relpath[3:]
return relpath.replace('\\', '.')
def __repr__(self):
return str(self.src_path)
def __eq__(self, other):
if not isinstance(other, TestClass):
return False
else:
return self.id == other.id
class TestCase(object):
def __init__(self, method, class_decl, parent):
self._parent = parent
self._method = method
self._mvn_name = self.parent.mvn_name + '#' + self.method.name
self.class_decl = class_decl
self._id = self.generate_id()
self._report = None
self._start_line = self.method.position[0]
self._end_line = self.find_end_line(self._start_line)
assert self._end_line != -1
@property
def mvn_name(self):
return self._mvn_name
@property
def src_path(self):
return self.parent.src_path
@property
def id(self):
return self._id
@property
def module(self):
return self.parent.module
@property
def method(self):
return self._method
@property
def parent(self):
return self._parent
@property
def report(self):
return self._report
@report.setter
def report(self, report):
self._report = report
@property
def start_line(self):
return self._start_line
@property
def end_line(self):
return self._end_line
@property
def passed(self):
return self.report.passed
@property
def failed(self):
return self.report.failed
@property
def has_error(self):
return self.report.has_error
def clear_report(self):
self.report = None
def get_error(self):
return self.report.get_error()
def has_the_same_code_as(self, other):
if len(self.method.body)==len(other.method.body):
i=0
while i< len(self.method.body):
if not self.method.body[i] == other.method.body[i]:
return False
return True
else:
return False
def generate_id(self):
ret_type = str(self.method.return_type)
if len(self.method.parameters) == 0:
parameters = '()'
else:
parameters = '(' + self.method.parameters[0].type.name
if len(self.method.parameters) > 1:
param_iter = iter(self.method.parameters)
next(param_iter)
for param in param_iter:
parameters += ', ' + param.type.name
parameters += ')'
return self.parent.src_path + '#' + self.class_decl.name + '#' + ret_type + '_' + self.method.name + parameters
def get_lines_range(self):
lower_position = self.method.position[0]
for annotation in self.method.annotations:
if annotation.position[0] < lower_position:
lower_position = annotation.position[0]
return (lower_position, self.end_line)
def contains_line(self, line):
range = self.get_lines_range()
return range[0] <= line <= range[1]
def find_end_line(self, line_num):
brackets_stack = []
open_position = (-1, -1)
with open(self.src_path, 'r') as j_file:
lines = j_file.readlines()
i = 1
for line in lines:
if i < line_num:
i += 1
continue
j = 1
for letter in line:
if '{' == letter:
brackets_stack.append('{')
break
else:
j += 1
if len(brackets_stack) == 1:
open_position = (i, j)
break
i+=1
if open_position[0] == -1 or open_position[1] == -1:
return -1
i = 1
for line in lines:
if i < open_position[0]:
i += 1
continue
j = 1
for letter in line:
if i == open_position[0] and j <= open_position[1]:
j += 1
continue
if letter == '{':
brackets_stack.append('{')
if letter == '}':
brackets_stack.pop()
if len(brackets_stack) == 0:
return i
j += 1
i += 1
def __repr__(self):
return self.id
def __eq__(self, other):
if not isinstance(other, TestCase):
return False
else:
return self.id == other.id
class TestClassReport(object):
def __init__(self, xml_doc_path, modlue_path):
if not os.path.isfile(xml_doc_path):
raise TestParserException('No such report file :' + xml_doc_path)
self.xml_path = xml_doc_path
self.success_testcases = []
self.failed_testcases = []
self._testcases = []
self._time = 0.0
self.maven_multiModuleProjectDirectory = ''
self._module_path = modlue_path
tree = ET.parse(self.xml_path)
root = tree.getroot()
self._name = root.get('name')
self._src_file_path = self.parse_src_path()
for testcase in root.findall('testcase'):
m_test = TestCaseReport(testcase, self)
if m_test.passed:
self.success_testcases.append(m_test)
else:
self.failed_testcases.append(m_test)
self._testcases.append(m_test)
self._time += m_test.time
properties_root = root.find('properties')
properties = properties_root.findall('property')
for property in properties:
if property.get('name') == 'maven.multiModuleProjectDirectory':
self.maven_multiModuleProjectDirectory = property.get('value')
@property
def time(self):
return self._time
@property
def name(self):
return self._name
@property
def testcases(self):
return self._testcases
def passed(self):
return len(self.failed_testcases) == 0
@property
def module(self):
return self._module_path
@property
def src_path(self):
return self._src_file_path
# Returns true if the given test name is this test or it's one of its testcases
def is_associated(self, test):
if test == 'test' or test == 'TEST' or test == 'Test':
return False
if test in self.name:
return True
for testcase in self.testcases:
if test in testcase.name:
return True
return False
def __repr__(self):
return str(self.name)
def parse_src_path(self):
test_name = os.path.basename(self.xml_path).replace('TEST-', '').replace('.java', '').replace('.xml', '')
test_name = test_name.replace('.', '\\')
test_name += '.java'
return self.module + '\\src\\test\\java\\' + test_name
def get_testcase_report(self, testcase_mvn_name):
ans_singelton = list(filter(lambda t: testcase_mvn_name.endswith(t.name), self.testcases))
if not len(ans_singelton) == 1:
raise TestParserException(str(len(ans_singelton)) + ' possible testcases reports for ' + testcase_mvn_name)
return ans_singelton[0]
class TestCaseReport(object):
def __init__(self, testcase, parent):
self._parent = parent
self.testcase_tag = testcase
self._name = self.parent.name + '#'+self.testcase_tag.get('name')
self._time = float(re.sub('[,]', '', self.testcase_tag.get('time')))
self._passed = False
self._failed = False
self._has_error = False
failure = self.testcase_tag.find('failure')
if not failure is None:
self._failed = True
self.error = self.testcase_tag.find('error')
if not self.error is None:
self._has_error = True
self._passed = not self._failed and not self._has_error
@property
def time(self):
return self._time
@property
def name(self):
return self._name
@property
def passed(self):
return self._passed
@property
def failed(self):
return self._failed
@property
def has_error(self):
return self._has_error
@property
def src_path(self):
return self.parent.src_path
@property
def module(self):
return self.parent.module
@property
def parent(self):
return self._parent
def get_error(self):
return self.error.text
def __repr__(self):
return str(self.name)
class TestParserException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg) | AIDnD-mvnpy | /AIDnD_mvnpy-1.0.1.tar.gz/AIDnD_mvnpy-1.0.1/mvnpy/TestObjects.py | TestObjects.py |
import os
import glob
from junitparser import JUnitXml, junitparser
from junitparser.junitparser import Error, Failure
from subprocess import Popen
import sys
import javalang
import shutil
import xml.etree.ElementTree
import tempfile
from contextlib import contextmanager
SURFIRE_DIR_NAME = 'surefire-reports'
OBSERVE_PATH = r"c:\temp\observe"
class Test(object):
def __init__(self, junit_test):
self.junit_test = junit_test
self.classname = junit_test.classname
self.name = junit_test.name
self.full_name = "{classname}@{name}".format(classname=self.classname, name=self.name).lower()
result = 'pass'
if type(junit_test.result) is Error:
result = 'error'
if type(junit_test.result) is Failure:
result = 'failure'
self.outcome = result
def __repr__(self):
return "{full_name}: {outcome}".format(full_name=self.full_name, outcome=self.outcome)
def is_passed(self):
return self.outcome == 'pass'
def get_observation(self):
return 0 if self.is_passed() else 1
def as_dict(self):
return {'_tast_name': self.full_name, '_outcome': self.outcome}
class Trace(object):
def __init__(self, test_name, trace):
self.test_name = test_name
self.trace = map(lambda t: t.lower(), trace)
def files_trace(self):
return list(set(map(lambda x: x.split("@")[0].lower(), self.trace)))
def get_trace(self, trace_granularity):
if trace_granularity == 'methods':
return list(set(self.trace))
elif trace_granularity == 'files':
return self.files_trace()
assert False
class Tracer(object):
def __init__(self):
self.traces = {}
@contextmanager
def trace(self):
yield
class JcovTracer(object):
pass
class AmirTracer(Tracer):
def __init__(self, git_path, tracer_path, copy_traces_to):
super(AmirTracer, self).__init__()
self.tracer_path = tracer_path
self.git_path = git_path
self.paths_file = tempfile.mktemp()
self.copy_traces_to = copy_traces_to
self.traces = {}
@contextmanager
def trace(self):
self.enable_tracer()
yield
self.collect_traces()
os.remove(self.paths_file)
def enable_tracer(self):
poms = []
for root, _, files in os.walk(self.git_path):
poms.extend(map(lambda name: os.path.join(root, name), filter(lambda name: name == "pom.xml", files)))
map(self.fix_pom_file, poms)
def fix_pom_file(self, pom_path):
xml.etree.ElementTree.register_namespace('', "http://maven.apache.org/POM/4.0.0")
xml.etree.ElementTree.register_namespace('xsi', "http://www.w3.org/2001/XMLSchema-instance")
def get_children_by_name(element, name):
return filter(lambda e: e.tag.endswith(name), element.getchildren())
def get_or_create_child(element, name):
child = get_children_by_name(element, name)
if len(child) == 0:
return xml.etree.ElementTree.SubElement(element, name)
else:
return child[0]
et = xml.etree.ElementTree.parse(pom_path)
surfire_plugins = filter(lambda plugin: filter(lambda x: x.text == "maven-surefire-plugin",
get_children_by_name(plugin, "artifactId")),
filter(lambda e: e.tag.endswith('plugin'), et.getroot().iter()))
trace_text = self.get_tracer_arg_line()
for plugin in surfire_plugins:
configuration = get_or_create_child(plugin, 'configuration')
argLine = get_or_create_child(configuration, 'argLine')
argLine.text = argLine.text + trace_text if argLine.text else trace_text
et.write(pom_path, xml_declaration=True)
def get_tracer_arg_line(self):
paths = [os.path.expandvars(r'%USERPROFILE%\.m2\repository'), self.git_path]
with open(self.paths_file, 'wb') as paths_file:
paths_file.write("\n".join(paths))
return ' -Xms8g -Xmx20048m -javaagent:{0}={1} '.format(self.tracer_path, self.paths_file)
def collect_traces(self):
traces_files = []
for root, dirs, _ in os.walk(os.path.abspath(os.path.join(self.git_path, "..\.."))):
traces_files.extend(map(lambda name: glob.glob(os.path.join(root, name, "TRACE_*.txt")), filter(lambda name: name == "DebuggerTests", dirs)))
for trace_file in reduce(list.__add__, traces_files, []):
dst = os.path.join(self.copy_traces_to, os.path.basename(trace_file))
if not os.path.exists(dst):
shutil.copyfile(trace_file, dst)
test_name = trace_file.split('\\Trace_')[1].split('_')[0].lower()
with open(trace_file) as f:
self.traces[test_name] = Trace(test_name, map(lambda line: line.strip().split()[2].strip(), f.readlines()))
class TestRunner(object):
def __init__(self, git_path, tracer=None):
self.git_path = git_path
if tracer is None:
tracer = Tracer()
self.tracer = tracer
self.observations = {}
def run(self):
with self.tracer.trace():
self.run_mvn()
pass
self.observations = self.observe_tests()
def run_mvn(self):
os.system(r'mvn install -fn -f {0}'.format(self.git_path))
def observe_tests(self):
outcomes = {}
for report in self.get_surefire_files():
try:
for case in JUnitXml.fromfile(report):
test = Test(case)
outcomes[test.full_name] = test
except:
pass
return outcomes
def get_surefire_files(self):
surefire_files = []
for root, _, files in os.walk(self.git_path):
for name in files:
if name.endswith('.xml') and os.path.basename(root) == SURFIRE_DIR_NAME:
surefire_files.append(os.path.join(root, name))
return surefire_files
def get_tests(self):
return set(self.tracer.traces.keys()) & set(self.observations.keys())
def get_packages_tests(self):
packages_tests = {}
for test_name in self.get_tests():
spllited = test_name.split('@')[0].split('.')
for ind in range(len(spllited)):
packages_tests.setdefault('.'.join(spllited[:ind]), set()).add(test_name)
return packages_tests
def checkout_commit(commit_to_observe, git_path):
git_commit_path = os.path.join(OBSERVE_PATH, os.path.basename(git_path), commit_to_observe)
Popen(['git', 'clone', git_path, git_commit_path]).communicate()
Popen(['git', 'checkout', '-f', '{0}'.format(commit_to_observe)], cwd=git_commit_path).communicate()
return git_commit_path
if __name__ == "__main__":
import csv
assert len(sys.argv) == 5
_ , repo, matrix_path, prediction_path, tracer_path = sys.argv
for x in [repo, prediction_path, tracer_path]:
assert os.path.exists(x)
predictions = {}
with open(prediction_path) as f:
lines = list(csv.reader(f))[1:]
predictions = dict(map(lambda line: (line[0].replace(".java", "").replace(os.path.sep, ".").lower(), line[1]), lines))
tr = TestRunner(repo, AmirTracer(repo, tracer_path))
tr.run()
from sfl_diagnoser.Diagnoser.diagnoserUtils import write_planning_file
tests = set(tr.tracer.traces.keys()) & set(tr.observations.keys())
components_priors = {}
for component in set(reduce(list.__add__, map(lambda test_name: tr.tracer.traces[test_name].files_trace(), tests), [])):
for prediction in predictions:
if component in prediction:
components_priors[component] = predictions[prediction]
components = set(components_priors.keys())
tests_details = map(lambda test_name: (test_name, list(set(tr.tracer.traces[test_name].files_trace()) & components), tr.observations[test_name].get_observation()),
tests)
write_planning_file(matrix_path, [], filter(lambda test: len(test[1]) > 0, tests_details), priors=components_priors) | AIDnD-mvnpy | /AIDnD_mvnpy-1.0.1.tar.gz/AIDnD_mvnpy-1.0.1/mvnpy/run_mvn.py | run_mvn.py |
import csv
import functools
import os
import xml.etree.cElementTree as et
import csv
import functools
import os
import xml.etree.cElementTree as et
import re
class PrimitiveTypes(object):
PRIMITIVES = {'Z': "boolean", 'V': "void", 'I': "int", 'J': "long", 'C': "char", 'B': "byte", 'D': "double",
'S': "short", 'F': "float"}
@staticmethod
def get_primitive_type(primitive):
return PrimitiveTypes.PRIMITIVES[primitive]
class Signature(object):
MATCHER = re.compile("\\(([^\\)]*)\\)(.*)")
def __init__(self, vmsig):
self.vmsig = vmsig
m = Signature.MATCHER.match(self.vmsig)
self.return_value = Signature.convert_vm_type(m.group(2))
self.args = Signature.get_args(m.group(1))
@staticmethod
def convert_vm_type(vm_type):
return Signature.get_type_name(vm_type.replace('/', '.'))
@staticmethod
def get_type_name(vm_type):
dims = 0
while vm_type[dims] == '[':
dims += 1
type = ''
if vm_type[dims] == 'L':
type = vm_type[dims + 1: len(vm_type) - 1]
else:
type = PrimitiveTypes.get_primitive_type(vm_type[dims])
return type + "[]" * dims
@staticmethod
def get_args(descr):
if descr == "":
return descr
pos = 0
last_pos = len(descr)
args = ''
dims = 0
while pos < last_pos:
ch = descr[pos]
if ch == 'L':
delimPos = descr.find(';', pos)
if delimPos == -1:
delimPos = last_pos
type = Signature.convert_vm_type(descr[pos: delimPos + 1])
pos = delimPos + 1
elif ch == '[':
dims += 1
pos += 1
continue
else:
type = PrimitiveTypes.get_primitive_type(ch)
pos += 1
args += type + "[]" * dims
dims = 0
if pos < last_pos:
args += ';'
return args
class Trace(object):
def __init__(self, test_name, trace):
self.test_name = test_name
self.trace = map(lambda t: t.lower(), trace)
def files_trace(self):
return list(set(map(lambda x: ".".join((x.split("(")[0].split(".")[:-1])), self.trace)))
def get_trace(self, trace_granularity="Method"):
if trace_granularity == "Method":
return list(set(self.trace))
elif trace_granularity == "File":
return self.files_trace()
assert False, "granularity is {0}".format(trace_granularity)
class JcovParser(object):
CLOSER = "/>"
METH = "<meth"
METHENTER = "<meth"
CSV_HEADER = ["component", "hit_count"]
def __init__(self, xml_folder_dir, instrument_only_methods=True):
self.jcov_files = map(lambda name: os.path.join(xml_folder_dir, name),
filter(lambda name: name.endswith('.xml'), os.listdir(xml_folder_dir)))
self.instrument_only_methods = instrument_only_methods
self.ids = self._get_method_ids()
self.lines_to_read = self._get_methods_lines()
def parse(self):
traces = {}
for jcov_file in self.jcov_files:
test_name = os.path.splitext(os.path.basename(jcov_file))[0].lower()
traces[test_name] = self._parse_jcov_file(jcov_file, test_name)
return traces
def _parse_jcov_file(self, jcov_file, test_name):
counts = self._get_methenter_ids_counts(jcov_file)
return Trace(test_name, map(lambda id: self.ids[id], counts))
def _get_methenter_ids_counts(self, jcov_file):
counts = {}
for method in self._get_lines_by_inds(jcov_file):
data = dict(map(lambda val: val.split('='),
method[len(JcovParser.METHENTER) + 1:-len(JcovParser.CLOSER)].replace('"', "").split()))
if data.has_key('count') and data.has_key('id') and int(data.get('count')):
counts[data["id"]] = data.get('count')
return counts
def _get_lines_by_inds(self, file_path):
with open(file_path) as f:
enumerator_next_ind = 0
for ind in self.lines_to_read:
map(functools.partial(next, f), xrange(enumerator_next_ind, ind))
enumerator_next_ind = ind + 1
yield next(f).strip()
def _get_methods_lines(self):
method_prefix = JcovParser.METH
if not self.instrument_only_methods:
method_prefix = JcovParser.METHENTER
with open(self.jcov_files[0]) as f:
return map(lambda line: line[0],
filter(lambda line: method_prefix in line[1] and JcovParser.CLOSER in line[1],
enumerate(f.readlines())))
@staticmethod
def get_children_by_name(element, name):
return filter(lambda e: e.tag.endswith(name), element.getchildren())
@staticmethod
def get_elements_by_path(root, path):
elements = [([], root)]
for name in path:
elements = reduce(list.__add__,
map(lambda elem: map(lambda child: (elem[0] + [child], child),
JcovParser.get_children_by_name(elem[1], name)), elements), [])
return elements
def _get_method_ids(self):
root = et.parse(self.jcov_files[0]).getroot()
method_ids = {}
for method_path, method in JcovParser.get_elements_by_path(root, ['package', 'class', 'meth']):
package_name, class_name, method_name = map(lambda elem: elem.attrib['name'], method_path)
if method_name == '<init>':
method_name = class_name
method_name = ".".join([package_name, class_name, method_name]) + "({0})".format(Signature(method.attrib['vmsig']).args)
id = 0
if self.instrument_only_methods:
id = method.attrib['id']
else:
id = JcovParser.get_elements_by_path(method, ['bl', 'methenter'])[0][1].attrib['id']
method_ids[id] = method_name
return method_ids
if __name__ == "__main__":
traces = JcovParser(r"C:\Users\deanc\Desktop\workspaces\java\subjects\commons-math\dbguer\traces").parse()
print traces
pass | AIDnD-mvnpy | /AIDnD_mvnpy-1.0.1.tar.gz/AIDnD_mvnpy-1.0.1/mvnpy/jcov_parser.py | jcov_parser.py |
import os
import shutil
import sys
import unittest
import Repo
import mvn
import xml.etree.ElementTree as ET
import TestObjects
orig_wd = os.getcwd()
class Test_mvnpy(unittest.TestCase):
def setUp(self):
os.chdir(orig_wd)
test_doc_1 = os.getcwd() + r'\static_files\TEST-org.apache.tika.cli.TikaCLIBatchCommandLineTest.xml'
test_doc_2 = os.getcwd() + r'\static_files\MavenProj\sub_mod_2\target\surefire-reports\TEST-p_1.AssafTest.xml'
self.test_report_1 = TestObjects.TestClassReport(test_doc_1, '')
try:
self.test_report_2 = TestObjects.TestClassReport(test_doc_2,
os.getcwd() + r'\static_files\MavenProj\sub_mod_2')
except TestObjects.TestParserException as e:
print("Unexpected state of the test driver. Caused excpetion:")
print (e.msg)
#resetEnvritonment()
self.test_1 = TestObjects.TestClass(
os.getcwd() + r'\static_files\MavenProj\sub_mod_2\src\test\java\NaimTest.java')
self.test_2 = TestObjects.TestClass(
os.getcwd() + r'\static_files\MavenProj\sub_mod_1\src\test\java\p_1\AmitTest.java')
self.test_2 = TestObjects.TestClass(
os.getcwd() + r'\static_files\MavenProj\sub_mod_1\src\test\java\p_1\AmitTest.java')
self.test_3 = TestObjects.TestClass(
os.getcwd() + r'\static_files\tika_1\src\test\java\org\apache\tika\parser\AutoDetectParserTest.java')
self.test_4 = TestObjects.TestClass(
os.getcwd() + r'\static_files\tika_1\src\test\java\org\apache\tika\sax\AppendableAdaptorTest.java')
self.test_5 = TestObjects.TestClass(
os.getcwd() + r'\static_files\tika_1\src\test\java\org\apache\tika\sax _1\AppendableAdaptorTest.java')
self.testcase_1 = [t for t in self.test_3.testcases if t.id.endswith('None_testExcel()')][0]
self.testcase_2 = [t for t in self.test_4.testcases if t.id.endswith('None_testAppendChar()')][0]
self.testcase_3 = [t for t in self.test_5.testcases if t.id.endswith('None_testAppendChar()')][0]
self.testcase_4 = [t for t in self.test_5.testcases if t.id.endswith('None_testAppendString()')][0]
def tearDown(self):
pass
def test_get_path(self):
expected_name = os.getcwd() + r'\static_files\MavenProj\sub_mod_2\src\test\java\NaimTest.java'
self.assertEqual(self.test_1.src_path, expected_name)
def test_get_module(self):
expected_module_1 = os.getcwd() + r'\static_files\MavenProj\sub_mod_2'
expected_module_2 = os.getcwd() + r'\static_files\MavenProj\sub_mod_1'
self.assertEqual(self.test_1.module, expected_module_1,
str(self.test_1) + ' module should be ' + expected_module_1)
self.assertEqual(self.test_2.module, expected_module_2,
str(self.test_2) + ' module should be ' + expected_module_2)
def test_mvn_name(self):
expected_name = 'p_1.AmitTest'
expected_method_name = 'p_1.AmitTest#hoo'
self.assertEqual(self.test_2.mvn_name, expected_name)
self.assertTrue(expected_method_name in list(map(lambda m: m.mvn_name, self.test_2.testcases)))
def test_get_testcases(self):
expected_testcase_id = os.getcwd() + r'\static_files\MavenProj\sub_mod_1\src\test\java\p_1\AmitTest.java#AmitTest#None_hoo()'
self.assertTrue(expected_testcase_id in list(map(lambda tc: tc.id, self.test_2.testcases)))
self.assertEqual(len(self.test_2.testcases), 5, "p_1.AmitTest should have only one method")
def test_get_report_path(self):
expected_report_path = os.getcwd() + r'\static_files\MavenProj\sub_mod_1\target\surefire-reports\TEST-p_1.AmitTest.xml'
self.assertEqual(self.test_2.get_report_path(), expected_report_path)
def test_report_get_src_file_path(self):
expected_src_file_path = os.getcwd() + r'\static_files\MavenProj\sub_mod_2\src\test\java\p_1\AssafTest.java'
self.assertEqual(self.test_report_2.src_path, expected_src_file_path)
def test_report_get_time(self):
testcases = self.test_report_1.testcases;
expected_time = 0.0
for testcase in self.test_report_1.testcases:
expected_time += testcase.time
self.assertEqual(self.test_report_1.time, expected_time)
def test_report_get_testcases(self):
expected_testcases_names = []
expected_testcases_names.append("testTwoDirsNoFlags")
expected_testcases_names.append("testBasicMappingOfArgs")
expected_testcases_names.append("testOneDirOneFileException")
expected_testcases_names.append("testTwoDirsVarious")
expected_testcases_names.append("testConfig")
expected_testcases_names.append("testJVMOpts")
for testcase in self.test_report_1.testcases:
if "testTwoDirsNoFlags" in testcase.name:
self.assertEqual(testcase.time, 0.071)
elif "testBasicMappingOfArgs" in testcase.name:
self.assertEqual(testcase.time, 0.007)
elif "testOneDirOneFileException" in testcase.name:
self.assertEqual(testcase.time, 0.007)
elif "testTwoDirsVarious" in testcase.name:
self.assertEqual(testcase.time, 0.006)
elif "testConfig" in testcase.name:
self.assertEqual(testcase.time, 0.006)
elif "testJVMOpts" in testcase.name:
self.assertEqual(testcase.time, 0.007)
else:
self.fail("Unexpected testcase name: " + testcase.name)
result_testcases_names = []
for testcase in self.test_report_1.testcases:
result_testcases_names.append(testcase.name)
for name in expected_testcases_names:
i = 0
for res_name in result_testcases_names:
if name in res_name:
continue
else:
i += 1
if i == len(result_testcases_names):
self.fail(name + ' not associated to ' + self.test_report_1.name)
def test_report_is_associated(self):
t_associated_name_1 = 'testTwoDirsNoFlags'
t_associated_name_2 = 'TikaCLIBatchCommandLineTest'
t_not_associated_name_1 = 'testHeyDirsNoFlags'
t_not_associated_name_2 = 'TikaBrotherCLIBatchCommandLineTest'
self.assertTrue(self.test_report_1.is_associated(t_associated_name_1))
self.assertTrue(self.test_report_1.is_associated(t_associated_name_2))
self.assertFalse(self.test_report_1.is_associated(t_not_associated_name_1))
self.assertFalse(self.test_report_1.is_associated(t_not_associated_name_2))
def test_report_is_associated(self):
t_associated_name_1 = 'testTwoDirsNoFlags'
t_associated_name_2 = 'TikaCLIBatchCommandLineTest'
t_not_associated_name_1 = 'testHeyDirsNoFlags'
t_not_associated_name_2 = 'TikaBrotherCLIBatchCommandLineTest'
self.assertTrue(self.test_report_1.is_associated(t_associated_name_1))
self.assertTrue(self.test_report_1.is_associated(t_associated_name_2))
self.assertFalse(self.test_report_1.is_associated(t_not_associated_name_1))
self.assertFalse(self.test_report_1.is_associated(t_not_associated_name_2))
def test_star_line_end_line(self):
self.assertTrue(self.testcase_1.start_line == 130, 'result - start_line : '+str(self.testcase_1.start_line))
self.assertTrue(self.testcase_1.end_line == 132, 'result - end_line : '+str(self.testcase_1.end_line))
@unittest.skip("Not woking")
def test_has_the_same_code(self):
self.assertTrue(self.testcase_2.has_the_same_code_as(self.testcase_3))
self.assertFalse(self.testcase_2.has_the_same_code_as(self.testcase_4))
def test_change_surefire_ver_1(self):
module = os.path.join( os.getcwd(),r'static_files\tika')
repo = Repo.Repo(module)
curr_wd = os.getcwd()
os.chdir(module)
os.system('git checkout HEAD -f')
mvn_help_cmd = 'mvn help:describe -DgroupId=org.apache.maven.plugins -DartifactId=maven-surefire-plugin'
excpected_version = '2.21.0'
poms = repo .get_all_pom_paths(module)
repo.change_surefire_ver(excpected_version, module )
self.assertTrue(len(poms)>0)
for pom in poms:
print('#### checking '+pom+' ######')
if(os.path.normcase(os.path.join( os.getcwd(),r'tika-dotnet\pom.xml')) ==os.path.normcase(pom)):
print('#### passing ' + pom + ' ######')
continue
module_path = os.path.abspath(os.path.join(pom, os.pardir))
with os.popen(mvn_help_cmd+' -f '+module_path) as proc:
tmp_file_path = 'tmp_file.txt'
with open(tmp_file_path, "w+") as tmp_file:
duplicate_stdout(proc, tmp_file)
with open(tmp_file_path, "r") as tmp_file:
duplicate_stdout(proc, tmp_file)
build_report = tmp_file.readlines()
version_line_sing = list(filter(lambda l: l.startswith('Version: '),build_report))
assert len(version_line_sing) == 1
version_line = version_line_sing[0]
self.assertEqual(version_line.lstrip('Version: ').rstrip('\n'),excpected_version)
os.system('git checkout HEAD -f')
os.chdir(curr_wd)
def test_change_surefire_ver_2(self):
module = os.path.join(os.getcwd(),r'static_files\commons-math')
repo = Repo.Repo(module)
curr_wd = os.getcwd()
os.chdir(module)
os.system('git checkout 35414bc4f4ef03ef12e99c027398e5dc84682a9e -f')
mvn_help_cmd = 'mvn help:describe -DgroupId=org.apache.maven.plugins -DartifactId=maven-surefire-plugin'
excpected_version = '2.22.0'
poms = repo.get_all_pom_paths(module)
repo.change_surefire_ver(excpected_version, module)
self.assertTrue(len(poms)>0)
for pom in poms:
print('#### checking '+pom+' ######')
if(os.path.normcase(r'C:\Users\user\Code\Python\BugMiner\mvn_parsers\static_files\tika\tika-dotnet\pom.xml') ==os.path.normcase(pom)):
print('#### passing ' + pom + ' ######')
continue
module_path = os.path.abspath(os.path.join(pom, os.pardir))
with os.popen(mvn_help_cmd+' -f '+module_path) as proc:
tmp_file_path = 'tmp_file.txt'
with open(tmp_file_path, "w+") as tmp_file:
duplicate_stdout(proc, tmp_file)
with open(tmp_file_path, "r") as tmp_file:
duplicate_stdout(proc, tmp_file)
build_report = tmp_file.readlines()
version_line_sing = list(filter(lambda l: l.startswith('Version: '),build_report))
assert len(version_line_sing) == 1
version_line = version_line_sing[0]
self.assertEqual(version_line.lstrip('Version: ').rstrip('\n'),excpected_version)
os.chdir(curr_wd)
def test_change_surefire_ver_3(self):
module = os.path.join( os.getcwd(),r'static_files\tika')
repo = Repo.Repo(module)
curr_wd = os.getcwd()
os.chdir(module)
os.system('git checkout d363b828bc6e714aa5f4ffedfbd1d09e1880f9ee -f')
mvn_help_cmd = 'mvn help:describe -DgroupId=org.apache.maven.plugins -DartifactId=maven-surefire-plugin'
excpected_version = '2.22.0'
poms = repo .get_all_pom_paths(module)
repo.change_surefire_ver(excpected_version, module )
self.assertTrue(len(poms)>0)
for pom in poms:
print('#### checking '+pom+' ######')
if(os.path.normcase(os.path.join( os.getcwd(),r'tika-dotnet\pom.xml')) ==os.path.normcase(pom)):
print('#### passing ' + pom + ' ######')
continue
module_path = os.path.abspath(os.path.join(pom, os.pardir))
with os.popen(mvn_help_cmd+' -f '+module_path) as proc:
tmp_file_path = 'tmp_file.txt'
with open(tmp_file_path, "w+") as tmp_file:
duplicate_stdout(proc, tmp_file)
with open(tmp_file_path, "r") as tmp_file:
duplicate_stdout(proc, tmp_file)
build_report = tmp_file.readlines()
version_line_sing = list(filter(lambda l: l.startswith('Version: '),build_report))
assert len(version_line_sing) == 1
version_line = version_line_sing[0]
self.assertEqual(version_line.lstrip('Version: ').rstrip('\n'),excpected_version)
os.system('git checkout HEAD -f')
os.chdir(curr_wd)
def test_add_argline_to_surefire(self):
module = os.path.join( os.getcwd(),r'static_files\tika')
expected_content = '-javaagent:C:\\Users\\user\\Code\\JAVA\\MavenProj\\agent.jar=C:\\Users\\user\\Code\\JAVA\\MavenProj\\paths.txt'
expected_argline = '<argLine>-javaagent:{}\\agent.jar={}\\paths.txt</argLine>'.format(module,module)
repo = Repo.Repo(module)
curr_wd = os.getcwd()
os.chdir(module)
os.system('git checkout d363b828bc6e714aa5f4ffedfbd1d09e1880f9ee -f')
mvn_help_cmd = 'mvn help:effective - pom - DartifactId = org.apache.maven.plugins:maven - surefire - plugin'
poms = repo .get_all_pom_paths(module)
repo.add_argline_to_surefire(expected_content)
self.assertTrue(len(poms)>0)
for pom in poms:
print('#### checking '+pom+' ######')
if(os.path.normcase(os.path.join( os.getcwd(),r'tika-dotnet\pom.xml')) ==os.path.normcase(pom)):
print('#### passing ' + pom + ' ######')
continue
module_path = os.path.abspath(os.path.join(pom, os.pardir))
with os.popen(mvn_help_cmd+' -f '+module_path) as proc:
tmp_file_path = 'tmp_file.txt'
with open(tmp_file_path, "w+") as tmp_file:
duplicate_stdout(proc, tmp_file)
with open(tmp_file_path, "r") as tmp_file:
duplicate_stdout(proc, tmp_file)
build_report = tmp_file.read()
self.assertTrue(expected_argline in build_report)
os.system('git checkout HEAD -f')
os.chdir(curr_wd)
def test_setup_tracer(self):
module = os.path.join( os.getcwd(),r'static_files\MavenProj')
repo = Repo.Repo(module)
expected_agent_path = os.path.join(repo.repo_dir, 'agent.jar')
expected_paths_path = os.path.join(repo.repo_dir, 'paths.txt')
repo.setup_tracer()
self.assertTrue(os.path.isfile(expected_agent_path))
self.assertTrue(os.path.isfile(expected_paths_path))
with open(expected_paths_path,'rb') as paths:
lines = paths.readlines()
self.assertEqual(lines[0].replace('\n','').replace('\r', ''), os.path.join(os.environ['USERPROFILE'], r'.m2\repository'))
self.assertEqual(lines[1], repo.repo_dir)
with open(os.path.join(repo.repo_dir,'pom.xml'),'rb') as pom:
lines = pom.readlines()
self.assertTrue('<argLine>-javaagent:{}={}</argLine>'.format(expected_agent_path,expected_paths_path), os.path.join(os.environ['USERPROFILE'], r'.m2\repository'))
def test_get_traces_1(self):
excpected_testcase_trace = 'Trace_org.apache.commons.math3.analysis.differentiation.DerivativeStructureTest@testField_1533637414916'
excpected_trace_1 = 'org.apache.commons.math3.analysis.differentiation.DSCompiler#getFreeParameters'
excpected_trace_2 = 'org.apache.commons.math3.analysis.differentiation.DSCompiler#getPartialDerivativeIndex'
debugger_tests_src = os.path.join(os.getcwd(), r'static_files\DebuggerTests_commons_math')
debugger_tests_dst = os.path.join(os.getcwd(), r'DebuggerTests')
if not os.path.isdir(debugger_tests_dst):
shutil.copytree(debugger_tests_src, debugger_tests_dst)
module = os.path.join( os.getcwd(),r'static_files\commons-math')
repo = Repo.Repo(module)
res = repo.get_traces()
self.assertTrue(excpected_testcase_trace in res.keys())
self.assertTrue(excpected_trace_1 in res[excpected_testcase_trace])
self.assertTrue(excpected_trace_2 in res[excpected_testcase_trace])
shutil.rmtree(debugger_tests_dst)
def test_get_traces_2(self):
excpected_testcase_trace = 'Trace_org.apache.commons.math3.analysis.function.LogitTest@testDerivativesHighOrder_1533637432535'
not_excpected_testcase_trace = 'Trace_org.apache.commons.math3.analysis.differentiation.DerivativeStructureTest@testField_1533637414916'
debugger_tests_src = os.path.join(os.getcwd(), r'static_files\DebuggerTests_commons_math')
debugger_tests_dst = os.path.join(os.getcwd(), r'DebuggerTests')
if not os.path.isdir(debugger_tests_dst):
shutil.copytree(debugger_tests_src, debugger_tests_dst)
module = os.path.join( os.getcwd(),r'static_files\commons-math')
repo = Repo.Repo(module)
res = repo.get_traces('LogitTest')
self.assertTrue(excpected_testcase_trace in res.keys())
self.assertFalse(not_excpected_testcase_trace in res.keys())
shutil.rmtree(debugger_tests_dst)
def test_set_pom_tag_1(self):
module = os.path.join( os.getcwd(),r'static_files\tika\tika-parent')
pom = os.path.join(module, 'pom.xml')
repo = Repo.Repo(module)
curr_wd = os.getcwd()
os.chdir(module)
os.system('git checkout HEAD -f')
mvn_help_cmd = 'mvn help:describe -DgroupId=org.apache.maven.plugins -DartifactId=maven-surefire-plugin'
expected_version = '2.21.0'
poms = repo .get_all_pom_paths(module)
# repo.change_pom(xquery=r"project\build\plugins[artifactId = 'maven-surefire-plugin']\version",
# value=expected_version)
repo.set_pom_tag(xquery = r"./build/plugins/plugin[artifactId = 'maven-surefire-plugin']/version",module=module, create_if_not_exist=True, value = expected_version)
root = ET.parse(pom).getroot()
xmlns, _ = mvn.tag_uri_and_name(root)
surfire_tag_singelton = root.findall(r"{}build/{}plugins/{}plugin[{}artifactId='maven-surefire-plugin']/{}version".format(xmlns,xmlns,xmlns,xmlns,xmlns))
self.assertEqual(len(surfire_tag_singelton) , 1)
self.assertEqual(surfire_tag_singelton[0].text, expected_version)
os.system('git checkout HEAD -f')
os.chdir(curr_wd)
def test_set_pom_tag_2(self):
module = os.path.join( os.getcwd(),r'static_files\tika\tika-parent')
xquery = r"./dependencyManagement/dependencies/dependency[artifactId = 'junit']/version"
pom = os.path.join(module, 'pom.xml')
repo = Repo.Repo(module)
curr_wd = os.getcwd()
os.chdir(module)
os.system('git checkout HEAD -f')
mvn_help_cmd = 'mvn help:describe -DgroupId=org.apache.maven.plugins -DartifactId=maven-surefire-plugin'
expected_version = '4.13.0'
poms = repo .get_all_pom_paths(module)
# repo.change_pom(xquery=r"project\build\plugins[artifactId = 'maven-surefire-plugin']\version",
# value=expected_version)
repo.set_pom_tag(xquery = xquery,module=module, create_if_not_exist=True, value = expected_version)
root = ET.parse(pom).getroot()
xmlns, _ = mvn.tag_uri_and_name(root)
junit_tag_singelton = root.findall(r"{}dependencyManagement/{}dependencies/{}dependency[{}artifactId='junit']/{}version".format(xmlns,xmlns,xmlns,xmlns,xmlns))
self.assertEqual(len(junit_tag_singelton) , 1)
self.assertEqual(junit_tag_singelton[0].text, expected_version)
os.system('git checkout HEAD -f')
os.chdir(curr_wd)
@unittest.skip("Important test but will require some time to validate")
def test_get_compilation_error_testcases(self):
print('test_get_compilation_error_testcases')
with open(os.getcwd() + r'\static_files\test_get_compilation_error_testcases_report.txt', 'r') as report_file:
report = report_file.read()
commit = [c for c in Main.all_commits if c.hexsha == 'a71cdc161b0d87e7ee808f5078ed5fefab758773'][0]
parent = commit.parents[0]
module_path = os.getcwd() + r'\tested_project\MavenProj\sub_mod_1'
Main.repo.git.reset('--hard')
Main.repo.git.checkout(commit.hexsha)
commit_tests = Main.test_parser.get_tests(module_path)
commit_testcases = Main.test_parser.get_testcases(commit_tests)
expected_not_compiling_testcase = [t for t in commit_testcases if 'MainTest#gooTest' in t.mvn_name][0]
Main.prepare_project_repo_for_testing(parent, module_path)
commit_new_testcases = Main.get_commit_created_testcases(commit_testcases)
compolation_error_testcases = Main.get_compilation_error_testcases(report, commit_new_testcases)
self.assertTrue(expected_not_compiling_testcase in compolation_error_testcases,
"'MainTest#gooTest should have been picked as for compilation error")
def test_exclusive_testing(self):
module = os.path.join(os.getcwd(), r'static_files\MavenProj')
repo = Repo.Repo(module)
tests = repo.get_tests()
white_list = tests[3:4]
mvn_names = list( map( lambda t: t.mvn_name, white_list ) )
repo.clean()
repo.test(tests = white_list)
reports = repo.get_tests_reports()
report_names = list(map(lambda r: os.path.basename(r.xml_path), reports))
resetEnvritonment()
self.assertEquals(len(reports) , len(white_list))
for mvn_name in mvn_names:
self.assertTrue( ('TEST-'+mvn_name+'.xml') in report_names)
def test_setup_test_generations_1(self):
expected_testclass_id = os.getcwd() + r'\static_files\MavenProj\sub_mod_1\src\test\java\p_1\Amit_ESTest.java'
module = os.path.join(os.getcwd(), r'static_files\MavenProj\sub_mod_1')
repo = Repo.Repo(module)
repo.clean()
repo.generate_tests()
tests = repo.get_tests()
self.assertTrue(expected_testclass_id in list(map(lambda t: t.src_path, tests)))
def test_setup_test_generations_exclusive_classes_list(self):
expected_testclass_id = os.getcwd() + r'\static_files\MavenProj\sub_mod_1\src\test\java\p_1\Amit_ESTest.java'
expected_not_testclass_id = os.getcwd() + r'\static_files\MavenProj\sub_mod_1\src\test\java\Main_ESTest.java'
module = os.path.join(os.getcwd(), r'static_files\MavenProj\sub_mod_1')
repo = Repo.Repo(module)
repo.clean()
repo.generate_tests(classes = ['p_1.Amit'])
tests = repo.get_tests()
self.assertTrue(expected_testclass_id in list(map(lambda t: t.src_path, tests)))
self.assertFalse(expected_not_testclass_id in list(map(lambda t: t.src_path, tests)))
def test_exclusive_testing_long_lists_of_tests(self):
module = os.path.join(os.getcwd(), r'static_files\commons-math')
repo = Repo.Repo(module)
tests = repo.get_tests()[:20]
white_list = tests
mvn_names = list( map( lambda t: t.mvn_name, white_list ) )
repo.clean()
tmp = repo.test(tests = white_list)
reports = repo.get_tests_reports()
report_names = list(map(lambda r: os.path.basename(r.xml_path), reports))
self.assertEquals(len(reports) , len(white_list))
for mvn_name in mvn_names:
self.assertTrue( ('TEST-'+mvn_name+'.xml') in report_names )
def resetEnvritonment():
os.system('mvn clean install -fn -f '+os.getcwd() + r'\static_files\MavenProj')
os.system('mvn clean install -fn -f ' + os.getcwd() + r'\static_files\tika_1')
def duplicate_stdout(proc, file):
while(True):
line = proc.readline()
if line == '':
break
sys.stdout.write(line)
file.write(line)
if __name__ == '__main__':
resetEnvritonment()
unittest.main() | AIDnD-mvnpy | /AIDnD_mvnpy-1.0.1.tar.gz/AIDnD_mvnpy-1.0.1/mvnpy/Test.py | Test.py |
import os
from pom_file import PomValue
from subprocess import Popen
class JcovTracer(object):
"""
<artifactId>maven-surefire-plugin</artifactId>
<version>2.18.1</version>
<configuration>
- <argLine>-Xmx2048m</argLine>
- </configuration>
+ <argLine>"-javaagent:C:\Users\User\Documents\GitHub\jcov\JCOV_BUILD\jcov_3.0\jcov.jar=grabber,include_list=C:\Users\User\Documents\GitHub\jcov\classes_file.txt"</argLine>
+ <additionalClasspathElements>
+ <additionalClasspathElement>C:\Users\User\Documents\GitHub\jcov\JCOV_BUILD\jcov_3.0\listener.jar</additionalClasspathElement>
+ </additionalClasspathElements>
+
+ <properties>
+ <property>
+ <name>listener</name>
+ <value>com.sun.tdk.listener.JUnitExecutionListener</value>
+ </property>
+ </properties>
+ </configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
"""
JCOV_JAR_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "externals", "jcov.jar")
LISTENER_JAR_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "externals", "listener.jar")
LISTENER_CLASS = "com.sun.tdk.listener.JUnitExecutionListener"
def __init__(self, classes_dir, path_to_out_template=None, path_to_classes_file=None, path_to_result_file=None, class_path=None, instrument_only_methods=True):
self.classes_dir = classes_dir
self.path_to_out_template = path_to_out_template
self.path_to_classes_file = path_to_classes_file
self.path_to_result_file = path_to_result_file
self.class_path = class_path
self.instrument_only_methods = instrument_only_methods
self.agent_port = str(self.get_open_port())
self.command_port = str(self.get_open_port())
def get_open_port(self):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def template_creator_cmd_line(self):
cmd_line = ['java', '-jar', JcovTracer.JCOV_JAR_PATH, 'tmplgen', '-verbose']
if self.class_path :
cmd_line.extend(['-cp', self.class_path])
if self.path_to_out_template:
cmd_line.extend(['-t', self.path_to_out_template])
if self.path_to_classes_file:
cmd_line.extend(['-c', self.path_to_classes_file])
if self.instrument_only_methods:
cmd_line.extend(['-type', 'method'])
cmd_line.extend(self.get_classes_path())
return cmd_line
def grabber_cmd_line(self):
cmd_line = ['java', '-jar', JcovTracer.JCOV_JAR_PATH, 'grabber', '-vv', '-port', self.agent_port, '-command_port', self.command_port]
if self.path_to_out_template:
cmd_line.extend(['-t', self.path_to_out_template])
if self.path_to_result_file:
cmd_line.extend(['-o', self.path_to_result_file])
return cmd_line
def get_agent_arg_line(self):
arg_line = r'-javaagent:{JCOV_JAR_PATH}=grabber,port={PORT}'.format(JCOV_JAR_PATH=JcovTracer.JCOV_JAR_PATH, PORT=self.agent_port)
if self.path_to_classes_file:
arg_line += r',include_list={CLASSES_FILE}'.format(CLASSES_FILE=self.path_to_classes_file)
if self.path_to_out_template:
arg_line += r',template={0}'.format(self.path_to_out_template)
if self.instrument_only_methods:
arg_line += r',type=method'
return PomValue("maven-surefire-plugin", ["configuration", "argLine"], '"{0}"'.format(arg_line))
def get_classes_path(self):
all_classes = [self.classes_dir]
for root, dirs, files in os.walk(self.classes_dir):
if "target" in dirs:
classes_path = os.path.join(root, "target", "classes")
if os.path.exists(classes_path):
all_classes.append(classes_path)
return all_classes
@staticmethod
def static_values_to_add_to_pom():
return [PomValue("maven-surefire-plugin", ["configuration", "properties", "property", "name"], "listener"),
PomValue("maven-surefire-plugin", ["configuration", "properties", "property", "value"], JcovTracer.LISTENER_CLASS),
PomValue("maven-surefire-plugin", ["configuration", "additionalClasspathElements", "additionalClasspathElement"], JcovTracer.LISTENER_JAR_PATH),
PomValue("maven-surefire-plugin", ["version"], "2.18.1")]
def get_enviroment_variables_values(self):
return [PomValue("maven-surefire-plugin", ["configuration", "forkMode"], "always"),
PomValue("maven-surefire-plugin", ["configuration", "environmentVariables", "JcovGrabberCommandPort"], self.command_port)]
def get_values_to_add(self):
return JcovTracer.static_values_to_add_to_pom() + [self.get_agent_arg_line()] + self.get_enviroment_variables_values()
def stop_grabber(self):
Popen(["java", "-jar", JcovTracer.JCOV_JAR_PATH, "grabberManager", "-save",'-command_port', self.command_port]).communicate()
Popen(["java", "-jar", JcovTracer.JCOV_JAR_PATH, "grabberManager", "-stop", '-command_port', self.command_port]).communicate()
def execute_jcov_process(self):
Popen(self.template_creator_cmd_line()).communicate()
Popen(self.grabber_cmd_line()) | AIDnD-mvnpy | /AIDnD_mvnpy-1.0.1.tar.gz/AIDnD_mvnpy-1.0.1/mvnpy/jcov_tracer.py | jcov_tracer.py |
import os
import subprocess
from threading import Timer
from cStringIO import StringIO
from bug import BugError
import TestObjects
import CompilationErrorObject
import sys
tracer_dir = path = os.path.join(os.path.dirname(__file__),r'tracer\java_tracer\tracer')
dict_super_sub_tags = {'dependencies':'dependency',
'mailingLists':'mailingList',
'licenses':'license',
'developers':'developer',
'plugins': 'plugin'}
# Returns the testcases generated compilation error in the maven build report
def get_compilation_error_testcases(compilation_error_report):
ans = []
for line in compilation_error_report:
if is_error_report_line(line):
compilation_error_testcase = get_error_test_case(line)
if not compilation_error_testcase == None and not compilation_error_testcase in ans:
ans.append(compilation_error_testcase)
return ans
# Returns list of compilation error reports objects
def get_compilation_errors(compilation_error_report):
ans = []
for line in compilation_error_report:
if is_error_report_line(line):
error = CompilationErrorObject.CompilationErrorReport(line)
if not error in ans:
ans.append(error)
return ans
# Returns lines list describing the compilation errors in th build report
def get_compilation_error_report(build_report):
ans = []
report_lines = build_report.splitlines()
i = 0
while i < len(report_lines):
if is_start_of_compilation_error_report(report_lines[i]):
ans.append(report_lines[i])
if '[ERROR] COMPILATION ERROR :' in report_lines[i]:
ans.append(report_lines[i + 1])
i += 1
i += 1
while not end_of_compilation_errors(report_lines[i]):
ans.append(report_lines[i])
i += 1
if i == len(report_lines)-1:
break
elif report_lines[i].endswith('Compilation failure'):
while i < len(report_lines) and not end_of_compilation_errors(report_lines[i]):
if is_error_report_line(report_lines[i]):
ans.append(report_lines[i])
i += 1
else:
i += 1
return ans
# Returns true if the line is a start of a compilation error report
def is_start_of_compilation_error_report(line):
return '[ERROR] COMPILATION ERROR :' in line or\
('[ERROR] Failed to execute goal' in line and 'Compilation failure' in line)
# Gets the test case associated with the compilation error
def get_error_test_case(line):
ans = None
path = ''
error_address = ''
parts = line.split(' ')
path_and_error_address = parts[1].split(':')
error_address = path_and_error_address[len(path_and_error_address) - 1]
error_line = int(error_address.strip('[]').split(',')[0])
path = ':'.join(path_and_error_address[:-1])
if path.startswith('/') or path.startswith('\\'):
path = path[1:]
return get_line_testcase(path, error_line)
# Returns true if the given line ends the complation error report
def end_of_compilation_errors(line):
return '[INFO] -------------------------------------------------------------' in line or\
'[INFO] Build failures were ignored.' in line or '[ERROR] -> [Help 1]' in line
# Returns true iff the given report line is a report of compilation error file
def is_error_report_line(line):
if line.startswith('[ERROR]'):
words = line.split(' ')
if len(words) < 2:
return False
if len(words[1]) < 1:
return False
if words[1][0] == '/':
words[1] = words[1][1:]
if not ':' in words[1]:
return False
if words[1].find('.java') == -1:
return False
should_be_a_path = words[1][:words[1].find('.java') + len('.java')]
return os.path.isfile(should_be_a_path)
return False
# Returns all testcases of given test classes
def get_testcases(test_classes):
ans = []
for test_class in test_classes:
ans += test_class.testcases
return ans
# Returns TestCase object representing the testcase in file_path that contains line
def get_line_testcase(path, line):
if not os.path.isfile(path):
raise FileNotFoundError
if not path.endswith('.java'):
raise TestParserException('Cannot parse files that are not java files')
testclass = TestClass(path)
class_decl = get_compilation_error_class_decl(testclass.tree(), line)
method = get_compilation_error_method(testclass.tree(), line)
return TestCase(method, class_decl, testclass)
# Returns the method name of the method containing the compilation error
def get_compilation_error_method(tree, error_line):
ans = None
for path, node in tree.filter(javalang.tree.ClassDeclaration):
for method in node.methods:
if get_method_line_position(method) < error_line:
if ans == None:
ans = method
elif get_method_line_position(ans) < get_method_line_position(method):
ans = method
return ans
# Returns the method name of the method containing the compilation error
def get_compilation_error_class_decl(tree, error_line):
ans = None
for path, node in tree.filter(javalang.tree.ClassDeclaration):
if get_class_line_position(node) < error_line:
if ans == None:
ans = node
elif get_class_line_position(node) < get_class_line_position(node):
ans = node
return ans
# Returns the line in which the method starts
def get_method_line_position(method):
return method.position[0]
# Returns the line in which the class starts
def get_class_line_position(class_decl):
return class_decl.position[0]
def export_as_csv(tests):
with open('all_tests.csv', 'a') as csvfile:
fieldnames = ['test_name', 'time']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, lineterminator='\n')
writer.writeheader()
for test in tests:
writer.writerow({'test_name': test.name, 'time': str(test.time)})
def get_mvn_exclude_tests_list(tests, time):
count = 0
ans = '-Dtest='
for test in tests:
if test.time > time:
if ans[len(ans) - 1] != '=':
ans += ','
ans += '!' + test.name
count += 1
return ans
# changes the plugin version of 'plugin_artifact_id' to 'version'. Does nothing if the 'plugin_artifact_id' is not in plugins_tag
def change_plugin_version_if_exists(plugins_tag, plugin_artifact_id, version):
plugin_p = None
for plugin in plugins_tag.getElementsByTagName('plugin'):
arifact_id_sing = list(filter(lambda child: child.localName == 'artifactId', plugin.childNodes))
if len(arifact_id_sing) == 0:
return
assert len(arifact_id_sing) == 1
if arifact_id_sing[0].firstChild.data == plugin_artifact_id:
plugin_p = plugin
break
if plugin_p == None:
return
version_v = None
surefire_version_sing = list(
filter(lambda child: child.localName == 'version', plugin_p.childNodes))
if len(surefire_version_sing) == 0:
new_ver = plugin_p.ownerDocument.createElement(tagName='version')
new_ver_text = new_ver.ownerDocument.createTextNode(version)
new_ver.appendChild(new_ver_text)
plugin_p.appendChild(new_ver)
surefire_version_sing = [new_ver]
assert len(surefire_version_sing) == 1
version_v = surefire_version_sing[0]
version_v.firstChild.data = version
# Genrated maven class name
def generate_mvn_class_names(src_path, module):
if 'src\\test' in src_path or 'src/test' in src_path or r'src\test' in src_path:
relpath = os.path.relpath(src_path, module + '\\src\\test\\java').replace('.java', '')
else:
relpath = os.path.relpath(src_path, module + '\\src\\main\\java').replace('.java', '')
while relpath.startswith('..\\'):
relpath = relpath[3:]
return relpath.replace('\\', '.')
# changes the plugin version of 'plugin_artifact_id' to 'version'. Does nothing if the 'plugin_artifact_id' is not in plugins_tag
def add_plugin_configuration_argline(plugins_tag, plugin_artifact_id, content):
plugin_p = None
for plugin in plugins_tag.getElementsByTagName('plugin'):
arifact_id_sing = list(filter(lambda child: child.localName == 'artifactId', plugin.childNodes))
if len(arifact_id_sing) == 0:
return
assert len(arifact_id_sing) == 1
if arifact_id_sing[0].firstChild.data == plugin_artifact_id:
plugin_p = plugin
break
if plugin_p == None:
return
version_v = None
surefire_configuration_sing = list(
filter(lambda child: child.localName == 'configuration', plugin_p.childNodes))
if len(surefire_configuration_sing) == 0:
new_configuration = plugin_p.ownerDocument.createElement(tagName='configuration')
plugin_p.appendChild(new_configuration)
surefire_configuration_sing = [new_configuration]
assert len(surefire_configuration_sing) == 1
configuration_tag = surefire_configuration_sing[0]
surefire_argLine_sing = list(
filter(lambda child: child.localName == 'argLine', configuration_tag.childNodes))
if len(surefire_argLine_sing) == 0:
new_argLine = configuration_tag.ownerDocument.createElement(tagName='argLine')
new_argLine_text = new_argLine.ownerDocument.createTextNode('')
new_argLine.appendChild(new_argLine_text)
configuration_tag.appendChild(new_argLine)
surefire_argLine_sing = [new_argLine]
new_argLine = surefire_argLine_sing[0]
new_argLine.firstChild.data = content
# Reutns tests object in the given tests directory
def parse_tests(tests_dir):
ans = []
for filename in os.listdir(tests_dir):
abs_path = os.path.join(tests_dir, filename)
if os.path.isdir(abs_path):
ans.extend(parse_tests(abs_path))
elif filename.endswith(".java"):
ans.append(TestObjects.TestClass(abs_path))
return ans
def wrap_mvn_cmd(cmd, time_limit = sys.maxint, dir=None):
output_tmp_files_dir = os.path.join('tmp_files','stdout_duplication')
if not os.path.isdir(output_tmp_files_dir):
os.makedirs(output_tmp_files_dir)
tmp_file_path = os.path.join(output_tmp_files_dir,'tmp_file.txt')
with open(tmp_file_path, 'w+') as tmp_f:
proc = subprocess.Popen(cmd, shell=True,stdout=tmp_f, cwd=dir)
t = Timer(time_limit, kill, args=[proc])
t.start()
proc.wait()
t.cancel()
with open(tmp_file_path, "r") as tmp_f:
build_report = tmp_f.read()
print(build_report)
if not time_limit == sys.maxint and not ('[INFO] BUILD SUCCESS' in build_report or '[INFO] BUILD FAILURE' in build_report):
raise MVNTimeoutError('Build took too long', build_report)
#if has_compilation_error(build_report):
# raise MVNTimeoutError('Build report has compilation error', build_report)
return build_report.replace('\\n','\n')
def wrap_mvn_cmd_1(cmd, time_limit = sys.maxint):
proc = subprocess.Popen(cmd, shell=True, stdout= subprocess.PIPE)
t = Timer(sys.maxint, kill, args=[proc])
t.start()
proc.wait()
t.cancel()
(out, err) = proc.communicate()
if not time_limit == sys.maxint and not ('[INFO] BUILD SUCCESS' in build_log or '[INFO] BUILD FAILURE' in build_log):
raise MVNError('Build took too long', build_log)
return build_log
def wrap_mvn_cmd_3(cmd, time_limit = sys.maxint):
sys.stderr.flush()
sys.stdout.flush()
olderr, oldout = sys.stderr, sys.stdout
try:
sys.stderr = StringIO()
sys.stdout = StringIO()
try:
proc = subprocess.Popen(cmd, shell=True)
t = Timer(time_limit, kill, args=[proc])
t.start()
proc.wait()
t.cancel()
finally:
sys.stderr.seek(0)
sys.stdout.seek(0)
err = sys.stderr.read()
build_log = sys.stdout.read()
finally:
sys.stderr = olderr
sys.stdout = oldout
if not time_limit == sys.maxint and not ('[INFO] BUILD SUCCESS' in build_log or '[INFO] BUILD FAILURE' in build_log):
raise MVNError('Build took too long', build_log)
return build_log
def duplicate_stdout(proc, file):
while (True):
line = proc.readline()
if line == '':
break
sys.stdout.write(line)
file.write(line)
# Define to kill a maven process if it tatkes too long
def kill(p):
p.kill()
class MVNError(Exception):
def __init__(self, msg, report = ''):
self.msg = msg
self.report = report
def __str__(self):
return repr(self.msg+'\n'+self.report)
class MVNTimeoutError(MVNError):
pass
def has_compilation_error(build_report):
compilation_error_report = get_compilation_error_report(build_report)
return len(compilation_error_report)>0
def tag_uri_and_name(elem):
if elem.tag[0] == "{":
uri, ignore, tag = elem.tag[1:].partition("}")
else:
uri = ''
tag = elem.tag
return uri, tag | AIDnD-mvnpy | /AIDnD_mvnpy-1.0.1.tar.gz/AIDnD_mvnpy-1.0.1/mvnpy/mvn.py | mvn.py |
from keras.callbacks import History, Callback
from keras.optimizers import Optimizer
from abc import abstractmethod, ABCMeta
from typing import Any
from env import Env
class Agent(metaclass=ABCMeta):
"""深度学习智能体。
作为模型核心,智能体实现强化学习算法,定义统一的抽象接口(基于 OpenAI Gym 接口)。
智能体与环境交互,首先观察环境的状态,基于观察结果、通过执行动作改变环境。
"""
def get_config(self) -> dict:
"""获取智能体配置。
Returns
----
智能体配置。
"""
return {}
@abstractmethod
def compile(self, optimizer: Optimizer, metrics: list, configs: dict):
"""合成智能体。
Args
----
+ optimizer(`keras.optimizer.Optimizer`): 训练期间的优化器;
+ metrics(list of `lambda y_true, y_pred: metric`): 训练期间的评估器;
+ configs(dict of additional arguments): 其他相关参数配置;
"""
raise NotImplementedError()
@abstractmethod
def fit(self, env: Env, nb_steps: int, action_repetition: int, callbacks: Callback,
verbose: int, visualize: bool, nb_max_start_steps: int,
start_step_policy: Any, log_interval: int,
nb_max_episode_steps: int) -> History:
"""在给定环境中训练智能体。
Args
----
+ env(`Env`实例): 深度学习环境;
+ nb_steps(int): 最大训练回合步数;
+ action_repetition(int): 智能体在不观察环境时重复相同动作的次数 (如果一个动作对环境影响很小,设为 >1会有帮助);
+ callbacks(list of `keras.callbacks.Callback`): 训练期间的回调函数列表 ;
+ verbose(int): 控制台日志等级(0 为无日志, 1 为间隔日志, 2 为每局训练日志);
+ visualize(bool): 是否在训练期间可视化环境(开启可视化会降低训练速度,通常用于模型调试);
+ nb_max_start_steps(int): 智能体在每局训练开始、按照`start_step_policy`执行的最大步数;
+ start_step_policy(`lambda observation: action`): 每局起始步数内采用的动作策略;
+ log_interval(int): 日志间隔步数(当`verbose`=1生效);
+ nb_max_episode_steps(int): 每局训练在自动重置前可以执行的最大步数,若为`None`表示无限进行下去、直到环境终止;
Returns
----
History 实例,包含整个训练过程的信息。
"""
raise NotImplementedError()
@abstractmethod
def test(self, env: Env, nb_episodes: int, action_repetition: int,
callbacks: Callback, visualize: bool, nb_max_episode_steps: int,
nb_max_start_steps: int, start_step_policy: Any, verbose: int) -> History:
"""模型测试。
Args
----
+ env(`Env`实例): 深度学习环境;
+ nb_episodes(int): 最大训练局数;
+ action_repetition(int): 智能体在不观察环境时重复相同动作的次数 (如果一个动作对环境影响很小,设为 >1会有帮助);
+ callbacks(list of `keras.callbacks.Callback`): 训练期间的回调函数列表;
+ visualize(bool): 是否在训练期间可视化环境(开启可视化会降低训练速度,通常用于模型调试);
+ nb_max_episode_steps(int): 每局训练在自动重置前可以执行的最大步数,若为`None`表示无限进行下去、直到环境终止;
+ nb_max_start_steps(int): 智能体在每局训练开始、按照`start_step_policy`执行的最大步数;
+ start_step_policy(`lambda observation: action`): 每局起始步数内采用的动作策略;
+ verbose(int): 控制台日志等级(0 为无日志, 1 为间隔日志, 2 为每局训练日志);
Returns
----
History 实例,包含整个训练过程的信息。
"""
raise NotImplementedError()
@abstractmethod
def reset_states(self):
"""重置所有内部状态。
每局训练结束后,重置模型所有内部保持的状态。
"""
raise NotImplementedError()
@abstractmethod
def forward(self, observation: object) -> object:
"""计算下一步要执行的动作。
基于当前环境观察状态,生成下一步动作。如果智能体策略由神经网络实现,这时对应一次前向计算。
Args
----
+ observation(object): 当前环境观察状态;
Returns
----
下一步动作。
"""
raise NotImplementedError()
@abstractmethod
def backward(self, reward: float, terminal: bool) -> list:
"""更新智能体。
在执行上一步动作后,根据奖励更新智能体。若智能体策略由神经网络实现,则对应一次反向传播的权值更新。
Args
----
+ reward(float): 通过执行智能体动作后获得的当前回合的奖励;
+ terminal(bool): 训练是否结束;
Returns
----
智能体评估值列表。
"""
raise NotImplementedError()
@abstractmethod
def load_weights(filepath: str):
"""从HDF5文件中加载智能体权重。
Args
----
+ filepath(str): 智能体权重hdf5文件路径;
"""
raise NotImplementedError()
@abstractmethod
def save_weights(self, filepath: str, overwrite: bool = False):
"""保存智能体权重文件到HDF5文件。
Args
----
+ filepath(str): 智能体权重hdf5文件路径;
+ overwrite(bool): 是否覆盖重写已有的权重文件(如果`False`且`filepath`已经存在,则抛出异常);
"""
raise NotImplementedError()
@property
def layers(self) -> list:
"""模型所有隐藏层列表。
如果模型使用多个内部模型实现,则以连接列表的形式返回。
"""
raise NotImplementedError()
@property
def metrics_names(self) -> list:
"""智能体使用的评估器名称列表。
"""
return []
@property
def _on_train_begin(self):
"""训练开始前调用的回调函数。
"""
raise NotImplementedError()
@property
def _on_train_end(self):
"""训练结束后调用的回调函数。
"""
raise NotImplementedError()
@property
def _on_test_begin(self):
"""测试开始前调用的回调函数。
"""
raise NotImplementedError()
@property
def _on_test_end(self):
"""测试结束后调用的回调函数。
"""
raise NotImplementedError() | AIFloodMaster | /AIFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/controllers/agent.py | agent.py |
class Processor(object):
"""状态动作处理器。
处理器充当 `Agent` 和 `Env` 之间的耦合机制。如果`Agent`对观察、行动和对环境的回报有不同的格式或数据要求,
通过实现自定义处理器,可以无需更改智能体或环境的底层实现的情况下完成两者之间有效地转换。
通常将 `Processor` 作为参数传入智能体,从而为 `Agent` 提供与环境交互耦合的工具。
"""
def process_step(self, observation: object, reward: float, done: bool,
info: dict) -> tuple:
"""通过处理/转换观察结果、奖励、是否结束、训练信息,完成一个完整的训练回合。
Args
----
+ observation(object): 智能体观察到的当前环境状态;
+ reward(float): 执行动作后获得的奖励值;
+ done(bool): 当前回合是否结束;
+ info(dict): 辅助诊断信息;
Returns
----
经过处理后的元组 (observation, reward, done, reward)。
"""
observation = self.process_observation(observation)
reward = self.process_reward(reward)
info = self.process_info(info)
return observation, reward, done, reward
def process_observation(self, observation: object) -> object:
"""处理/转换直接观察的环境状态为智能体需要的格式/数据。
Args
----
+ observation(object): 智能体观察到的当前环境状态;
Returns
----
observation(object): 经过处理后的环境观察结果。
"""
raise NotImplementedError()
def process_reward(self, reward: float) -> float:
"""处理/转换直接返回的动作奖励为智能体需要的格式/数据。
Args
----
+ reward(float): 执行动作后获得的奖励值;
Returns
----
reward(float): 经过处理后的动作奖励。
"""
raise NotImplementedError()
def process_info(self, info: dict) -> dict:
"""处理/转换直接返回的训练信息为智能体需要的格式/数据。
Args
----
+ info(dict): 训练过程中辅助诊断信息;
Returns
----
info(dict): 经过处理后的训练信息。
"""
raise NotImplementedError()
def process_action(self, action: object) -> object:
"""处理/转换智能体预报的原始动作为环境中可以执行的动作。
Args
----
+ action(object): 智能体预报的下一个动作;
Returns
----
action(object): 经过处理后可以再环境中执行的动作。
"""
raise NotImplementedError()
def process_state_batch(self, batch: list) -> list:
"""处理整个批次的状态值。
Args
----
+ batch(list): 观察到的状态值列表。
Returns
----
batch(list): 经处理的状态值列表。
"""
raise NotImplementedError()
@property
def metrics(self) -> list:
"""处理器中的评估器列表(List of `lambda y_true, y_pred: metric` functions)。
"""
return []
def metrics_names(self) -> list:
"""处理器中的评估器名称列表。
"""
return [] | AIFloodMaster | /AIFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/controllers/processor.py | processor.py |
import numpy as np
from abc import abstractmethod, ABCMeta
class Env(ABCMeta):
"""深度学习环境。
作为项目中所有 `Agent` 交互学习的环境,定义统一的抽象接口(基于 OpenAI Gym 接口)。
通常将 `Env` 作为参数传入深度学习智能体,为 `Agent` 的训练提供交互环境。
"""
reward_range = (-np.inf, np.inf)
action_space = None
observation_space = None
@abstractmethod
def step(self, action: object) -> tuple:
"""执行一个时间步下的环境交互。
Args
----
+ action(object): 由智能体提供的动作;
Returns
----
Tuple (observation, reward, done, info), which:
+ observation(object), 智能体观察的当前环境状态;
+ reward(float), 执行动作后获得的奖励值;
+ done(bool), 当前回合是否结束;
+ info(dict), 辅助诊断信息;
"""
raise NotImplementedError()
@abstractmethod
def reset(self) -> object:
"""重置环境状态并返回初始观察。
Returns
----
Observation(object): 环境状态空间的初始观察(初始奖励设为 0)。
"""
raise NotImplementedError()
@abstractmethod
def close(self):
"""关闭环境并清理内存。
"""
raise NotImplementedError()
@abstractmethod
def render(self, mode: str = 'human', close: bool = False):
"""渲染环境。
支持的渲染模型因各自环境的实现而定,甚至可能完全不支持任何渲染。
Args
----
+ mode(str): 渲染模式;
+ close(bool): 是否关闭所有渲染;
"""
raise NotImplementedError()
@abstractmethod
def seed(self, seed: list = None) -> list:
"""设置环境中随机数生成器种子。
Args
----
+ seed(list of int) : 随机数生成器种子;
Returns
----
环境中随机数生成器使用的种子的列表。
"""
raise NotImplementedError()
@abstractmethod
def setup(self, *args, **kwargs):
"""提供环境运行时配置。
配置中应该包含环境如何运行的相关信息/数据(比如远程服务器地址,数据路径等),
但是同时不能影响环境语义。
"""
raise NotImplementedError()
@abstractmethod
def load(filepath: str):
"""从本地加载环境。
Args
----
+ filepath(str): 环境保存文件路径;
"""
raise NotImplementedError()
@abstractmethod
def save(self, filepath: str, overwrite: bool = False):
"""保存环境模型到本地文件。
Args
----
+ filepath(str): 环境模型保存文件路径;
+ overwrite(bool): 是否覆盖重写已有的权重文件(如果`False`且`filepath`已经存在,则抛出异常);
"""
raise NotImplementedError()
@property
def states(self) -> list:
"""环境当前所有状态。
"""
raise NotImplementedError()
@property
def layers(self) -> list:
"""环境模型所有隐藏层列表。
如果模型使用多个内部模型实现,则以连接列表的形式返回。
"""
raise NotImplementedError()
@property
def configs(self) -> dict:
"""
获取环境配置。
"""
raise NotImplementedError()
def __del__(self):
self.close()
def __str__(self):
return '<{} instance>'.format(type(self).__name__) | AIFloodMaster | /AIFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/controllers/env.py | env.py |
import warnings
from typing import Any
import numpy as np
from tensorflow.keras import models, layers, optimizers # 统一从keras中导出接口
from rl.agents import DDPGAgent
from rl.memory import SequentialMemory
from rl.random import OrnsteinUhlenbeckProcess
from rl.core import Agent # 非本地版本
from rl.core import Env # 非本地版本
class DdpgAgent(Agent):
"""采用 DDPG 算法的深度强化学习引擎.
这里实际上是对 DDPGAgent的封装。
"""
def __init__(self, Id: str):
self._id = Id
self._configs = {} # 强化学习配置
self._agent = None # 强化学习引擎
self._memory = None # 强化学习内存池
self._nb_actions = None # 动作数量
self._actions_shape = None # 动作状态数据形状
self._obs_shape = None # 环境状态数据形状
def init(self, action_space_shape: tuple, obs_space_shape: tuple):
"""初始化模型。
为了能够更加自由的组装强化学习控制器,将初始化方法独立出来。
Args
----
+ action_space_shape(tuple): 环境动作状态空间数据形状;
+ obs_space_shape(tuple): 环境观察状态空间数据形状;
"""
# 动作数量
self._nb_actions = action_space_shape[0]
# 一维结构
self._actions_shape = (self._nb_actions, )
# 二维结构
self._obs_shape = (1, ) + obs_space_shape
def set_actor_configs(self, dense_units: list, activations: list):
"""设置 “行动者” 神经网络参数。
Args
----
+ dense_units(list of int): 神经网络中各全连接层的单元数量(不含最后输出层);
+ activations(list of str): 神经网络中各全连接层的激活函数(不含最后输出层);
"""
assert len(dense_units) > 1 or len(
activations) > 1, "Empty actor neural network settings."
assert len(dense_units) == len(
activations
), "Number of units and activations of actor neural network dosen't match."
nb = self._nb_actions
if min(dense_units) < nb:
warnings(f"Actor neural network dense units should bigger than {nb}.")
units = [max(n, nb) for n in dense_units]
self._configs['actor_units'] = units
self._configs['actor_activations'] = activations
def _build_actor_nn(self):
"""搭建 “行动者” 神经网络用于逼近策略函数,生成环境动作。
“行动者” 模型输入当前环境状态,输出下一步要执行的动作。
"""
actor = models.Sequential()
actor.add(layers.Flatten(input_shape=self._obs_shape))
units = self._configs['actor_units']
funcs = self._configs['actor_activations']
for units, activation in zip(units, funcs):
actor.add(layers.Dense(units))
actor.add(layers.Activation(activation))
actor.add(layers.Dense(self._nb_actions))
actor.add(layers.Activation('sigmoid'))
return actor
def set_critic_configs(self, dense_units: list, activations: list):
"""设置 “评估者” 神经网络参数。
Args
----
+ dense_units(list of int): 神经网络中各全连接层的单元数量(不含最后输出层);
+ activations(list of str): 神经网络中各全连接层的激活函数(不含最后输出层);
"""
assert len(dense_units) > 1 or len(
activations) > 1, "Empty critic neural network settings."
assert len(dense_units) == len(
activations
), "Number of units and activations of critic neural network dosen't match."
assert min(dense_units) > 0, "Negative critic network dense units."
self._configs['critic_units'] = dense_units
self._configs['critic_activations'] = activations
def _build_critic_nn(self):
"""搭建 “评估者” 神经网络用于逼近值函数,评价模型生成的动作。
“评估者” 模型输入当前执行的动作和(执行动作后)的环境状态两部分, 输出Q值。
"""
action_input = layers.Input(shape=self._actions_shape) # shape(None, 2)
observation_input = layers.Input(shape=self._obs_shape) # shape(None, 1, 10)
flatten_observation = layers.Flatten()(observation_input) # shape(None, 10)
x = layers.Concatenate()([action_input, flatten_observation]) # shape(None, 12)
units = self._configs['critic_units']
funcs = self._configs['critic_activations']
for units, activation in zip(units, funcs):
x = layers.Dense(units)(x)
x = layers.Activation(activation)(x)
x = layers.Dense(1)(x) # shape(None, 1)
x = layers.Activation('linear')(x)
# input shape[(None, 2), (None, 1, 10)], output shape[(None, 1)]
critic = models.Model(inputs=[action_input, observation_input], outputs=x)
return critic, action_input, observation_input
def set_compile_configs(self, memory_limit: int, window_len: int,
actor_warmup_steps: int, critic_warmup_steps: int,
gamma: float, init_lr: float, target_lr: float):
"""设置ddpg引擎参数。
Args
----
+ memory_limit(int): 动作回放池容量;
+ window_len(int): 动作回放池滑动更新窗口大小(步数);
+ actor_warmup_steps(int): 行动者模型预热期长度(预热之后开始模型更新);
+ critic_warmup_steps(int): 评估者模型预热期长度(预热后开始模型更新);
+ gamma(float): 贝尔曼方程(奖励)折扣系数;
+ init_lr(float): 模型的初始更新步长(学习率);
+ target_lr(float): 目标(辅助)模型的更新步长(学习率,通常低于`lr`);
"""
self._configs['memory_limit'] = max(int(memory_limit), 1)
self._configs['window_len'] = max(int(window_len), 1)
if self._configs['memory_limit'] < self._configs['window_len']:
raise IOError(
f"Compile arg: memory_limit({memory_limit}) < window_len({window_len})."
)
self._configs['actor_warmup'] = max(int(actor_warmup_steps), 0)
self._configs['critic_warmup'] = max(int(critic_warmup_steps), 0)
self._configs['ddpg_gamma'] = min(abs(gamma), 1.0)
self._configs['ddpg_lr'] = max(abs(init_lr), 1.e-3)
self._configs['ddpg_target_lr'] = abs(target_lr)
def _build_ddpg_agent(self, actor, critic, action_input):
"""配置 DDPG 算法模型。
"""
# 训练过程中动作、状态、奖励等记录器(按指定大小存储)。
memory = SequentialMemory(limit=self._configs['memory_limit'],
window_length=self._configs['window_len'])
# 训练过程中按概率采取随机动作(加入随机噪声),探索更好的动作。
# 随机噪音可能导致agent获得的actions超出action_space。
# 但是在DdpgEnv内以及pyswmm内会对action做检查。
random_process = OrnsteinUhlenbeckProcess(size=self._nb_actions,
theta=0.15,
mu=0.0,
sigma=0.1)
# DDPG 算法引擎。
# DDPG 引擎配置了行动者和评估者网络,二者配合提高模型的表现;同时,
# DDPG 引擎配置了模型副本,但使用不同的更新步长,用于稳定训练过程;
# DDPG 引擎配置了回放池、随机噪音,用于探索可能的更优的动作。
agent = DDPGAgent(nb_actions=self._nb_actions,
actor=actor,
critic=critic,
critic_action_input=action_input,
memory=memory,
nb_steps_warmup_actor=self._configs['actor_warmup'],
nb_steps_warmup_critic=self._configs['critic_warmup'],
random_process=random_process,
gamma=self._configs['ddpg_gamma'],
target_model_update=self._configs['ddpg_target_lr'])
return agent, memory
def compile(self, metrics=['mae']):
actor = self._build_actor_nn()
critic, action_input, _ = self._build_critic_nn()
agent, memory = self._build_ddpg_agent(actor, critic, action_input)
opt = optimizers.Adam(learning_rate=self._configs['ddpg_lr'], clipnorm=1.0)
agent.compile(opt, metrics)
self._agent = agent
self._memory = memory
def get_config(self):
return self._configs
def forward(self, observation: object):
return self._agent.forward(observation)
def backward(self, reward: float, terminal: bool):
return self._agent.backward(reward, terminal)
def fit(self,
env: Env,
nb_steps: int,
action_repetition: int = 1,
callbacks: list = None,
verbose: int = 1,
visualize: bool = False,
nb_max_start_steps: int = 0,
start_step_policy: Any = None,
log_interval: int = 10000,
nb_max_episode_steps: int = None):
history = self._agent.fit(env, nb_steps, action_repetition, callbacks, verbose,
visualize, nb_max_start_steps, start_step_policy,
log_interval, nb_max_episode_steps)
return history
def test(self,
env: Env,
nb_episodes: int = 1,
action_repetition: int = 1,
callbacks: list = None,
visualize: bool = False,
nb_max_episode_steps: int = None,
nb_max_start_steps: int = 0,
start_step_policy: Any = None,
verbose: int = 1):
self._agent.test(env, nb_episodes, action_repetition, callbacks, visualize,
nb_max_episode_steps, nb_max_start_steps, start_step_policy,
verbose)
def reset_states(self):
self._agent.reset_states()
def save_weights(self, filepath: str, overwrite: bool = False):
self._agent.save_weights(filepath, overwrite)
def load_weights(self, filepath: str):
self._agent.load_weights(filepath)
def extract_result(self):
"""提取强化学习结果。
"""
# 提取强化学习日志。
ctrl_steps = self._env._total_steps
all_actions = np.array(list(self._memory.actions)[0:ctrl_steps])
all_rewards = np.array(list(self._memory.rewards)[0:ctrl_steps])
all_states = np.array(list(self._memory.observations)[0:ctrl_steps])
# 提取水池水深。
ponds_id, juncs_id = self._env.get_ctrl_nodes_id()
num_ponds = len(ponds_id)
num_juncs = len(juncs_id)
all_ponds_depth = all_states[:, :num_ponds]
# 提取总溢流体积。
all_flooding = all_states[:, num_ponds:2 * num_ponds + num_juncs]
# 统计每回合的平均奖励。
avg_rewards = []
num_episodes = int(self._memory.nb_entries / ctrl_steps)
for i in range(num_episodes):
temp_rwd = all_rewards[ctrl_steps * i:ctrl_steps * (i + 1)]
avg_rewards.append(np.mean(temp_rwd))
return all_rewards, all_actions, all_ponds_depth, all_flooding, avg_rewards | AIFloodMaster | /AIFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/controllers/PySwmmDdpgRTC/DdpgAgent.py | DdpgAgent.py |
import numpy as np
from copy import deepcopy
import os
import sys
sys.path.append(os.path.abspath(r"."))
from FloodMaster.controllers.controller import ABCController
class DdpgRtc(ABCController):
"""采用 DDPG 深度强化算法、基于 PySwmm 模型的闸门联调实现管网水力系统实时控制(RTC).
"""
def __init__(self, ID: str):
"""实时控制器初始化。
"""
self._id = ID
self._configs = {}
self._env = None
self._agent = None
self._processor = None
self._rewards = []
self._actions = []
self._observations = []
def init(self, env: object, agent: object, processor: object, confs: dict):
self._rewards = []
self._actions = []
self._observation = []
# 环境配置
self._env = env
self._configs['env'] = self._env.configs
# 引擎配置
self._agent = agent
self._configs['agent'] = self._agent.get_config()
# 适配器
if processor:
self._processor = processor
# 其他配置
self._configs['env_path'] = confs['env_path']
self._configs['agent_path'] = confs['agent_path']
def fit(self, confs: dict):
history = self._agent.fit(self._env,
nb_steps=confs['nb_steps'],
verbose=confs['verbose'],
action_repetition=confs['action_repetition'],
log_interval=confs['log_interval'])
self._configs.update(confs)
self._agent.save_weights(self._configs['agent_path'], True)
return history
def run(self):
done = False
history = {'actions': [], 'rewards': [], 'observations': [], 'metrics': []}
# 引擎和环境的初始化
self.reset(None)
observation = deepcopy(self._env.states) # 重置环境并获取初始状态
if self._processor: # 通过适配器处理环境原始状态为引擎的输入数据
observation = self._processor.process_observation(observation)
assert observation is not None
# 在环境中执行模拟直到环境结束
while (not done):
# 调用引擎生成环境动作
action, observation, metrics, r, done = self.step(observation)
# 记录日志
history['actions'].append(action)
history['observations'].append(observation)
history['metrics'].append(metrics)
history['rewards'].append(r)
# 环境结束后再对引擎执行以此前向预报和反向更新
if done:
self._agent.forward(observation)
self._agent.backward(0., terminal=False)
observation = None
# 提取强化学习过程状态
self._actions = history['actions']
self._observations = history['observations']
self._rewards = history['rewards']
return history['actions']
def step(self, observation):
# 调用引擎生成环境动作
action = self._agent.forward(observation)
if self._processor: # 通过适配器处理引擎动作为环境动作
action = self._processor.process_action(action)
# 在环境中执行动作并获得新的环境状态以及执行动作的奖励等
observation, r, done, info = self._env.step(action)
observation = deepcopy(observation)
if self._processor:
observation, r, done, info = self._processor.process_step(
observation, r, done, info)
# 保持过程状态
reward = self._rewards[-1] if len(self._rewards) > 0 else np.float32(0)
reward += r
self._rewards.append(reward)
self._actions.append(action)
self._observations.append(observation)
# 更新引擎
metrics = self._agent.backward(reward, terminal=done)
return action, observation, metrics, r, done
def reset(self, configs):
self._agent.reset_states()
self._env.reset()
self._rewards.clear()
self._actions.clear()
self._observations.clear()
@property
def configs(self):
return self._configs
@property
def actions(self):
return self._actions
@property
def observations(self):
return self._observations
if __name__ == "__main__":
import matplotlib.pyplot as plt
import pandas as pd
from DdpgEnv import PySwmmEnv
from DdpgAgent import DdpgAgent
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
# 配置环境
confs = {}
proj_dir = r".\data\ddpg-swmm-data\\"
mid_dir1 = r"obs_data_1month_all_controlled\\"
mid_dir2 = r"obs_data_daily_fcsts\\"
swmm_inp = proj_dir + mid_dir1 + r"test_01012018_01312018.inp"
fcst_inp = proj_dir + mid_dir2 + r"test_01012018_01312018.csv"
ctrl_step = 900
ctrl_ponds = ["St1", "St2"]
ponds_max_depth = [4.61, 4.61]
ponds_target = [2.0, 2.0]
ctrl_orifices = ["R1", "R2"]
ctrl_juncs = ["J1"]
swmm_env = PySwmmEnv()
swmm_env.init(swmm_inp, fcst_inp, ctrl_step, ctrl_ponds, ponds_max_depth,
ponds_target, ctrl_orifices, ctrl_juncs)
# 强化学习引擎
agent_Id = "ddpg-1"
agent = DdpgAgent(agent_Id)
action_space_shape = swmm_env.action_space.shape
obs_space_shape = swmm_env.observation_space.shape
agent.init(action_space_shape, obs_space_shape)
dense_units = [16, 16, 8]
activations = ['relu', 'relu', 'relu']
agent.set_actor_configs(dense_units, activations)
dense_units = [32, 32, 32]
activations = ['relu', 'relu', 'relu']
agent.set_critic_configs(dense_units, activations)
memory_limit = 1000000
window_len = 1
actor_warmup_steps = 50
critic_warmup_steps = 50
gamma = 0.99
init_lr = 0.001
target_lr = 0.001
agent.set_compile_configs(memory_limit, window_len, actor_warmup_steps,
critic_warmup_steps, gamma, init_lr, target_lr)
agent.compile()
# 组装控制器
rtc_Id = "rtc-1"
rtc = DdpgRtc(rtc_Id)
confs["env_path"] = "./tests/models/ddpg_env/"
confs["agent_path"] = "./tests/models/ddpg_agent/"
rtc.init(swmm_env, agent, None, confs)
# 强化学习训练
print("\n==== fit process ====\n")
rtc_confs = {}
rtc_confs["nb_steps"] = 6000
rtc_confs['verbose'] = 1
rtc_confs['action_repetition'] = 1
rtc_confs['log_interval'] = 1000
rtc.fit(rtc_confs)
# 强化学习调度
print("\n==== run ====\n")
actions0 = rtc.run()
# 强化学习步进调度
print("\n==== run by step ====\n")
rtc.reset(None)
done = False
observation = deepcopy(swmm_env.states)
while (not done):
action, observation, metrics, r, done = rtc.step(observation)
# 测试并提取结果
def extract_result(env, agent):
"""提取强化学习结果。
"""
# 提取强化学习日志。
ctrl_steps = env._total_steps
all_actions = np.array(list(agent._memory.actions)[0:ctrl_steps])
all_rewards = np.array(list(agent._memory.rewards)[0:ctrl_steps])
all_states = np.array(list(agent._memory.observations)[0:ctrl_steps])
# 提取水池水深。
ponds_id, juncs_id = env.get_ctrl_nodes_id()
num_ponds = len(ponds_id)
num_juncs = len(juncs_id)
all_ponds_depth = all_states[:, :num_ponds]
# 提取总溢流体积。
all_flooding = all_states[:, num_ponds:2 * num_ponds + num_juncs]
# 统计每回合的平均奖励。
avg_rewards = []
num_episodes = int(agent._memory.nb_entries / ctrl_steps)
for i in range(num_episodes):
temp_rwd = all_rewards[ctrl_steps * i:ctrl_steps * (i + 1)]
avg_rewards.append(np.mean(temp_rwd))
return all_rewards, all_actions, all_ponds_depth, all_flooding, avg_rewards
rewards, actions, depths, flooding, avg_rewards = extract_result(swmm_env, agent)
# total_flood = agent._extract_flooding_volume()
actions = np.maximum(actions, 0.0) # env类生效的动作(孔口相对开度为0~1)
actions = np.minimum(actions, 1.0)
rainfalls = pd.read_csv(fcst_inp)
total_rainfalls = rainfalls['rain1_total'] + rainfalls['rain2_total']
# 展示结果
plt.subplot(5, 1, 1)
depth_plot = plt.plot(depths)
plt.ylim(0, 6)
plt.title('depths')
plt.ylabel('ft')
plt.subplot(5, 1, 2)
plt.plot(total_rainfalls, color='b')
plt.title('total_rainfalls')
plt.ylabel('rainfall')
plt.xlabel('time step')
plt.subplot(5, 1, 3)
act_plot = plt.plot(actions[:, 0], '-', actions[:, 1], ':')
plt.ylim(0, 1.05)
plt.title('Policy')
plt.ylabel('Valve Position')
plt.xlabel('time step')
first_legend = plt.legend(act_plot, ctrl_orifices)
plt.subplot(5, 1, 4)
plt.plot(flooding, label=ctrl_ponds + ctrl_juncs)
plt.ylim(0)
plt.title('Flooding')
plt.ylabel('CFS')
# flood_str = "Total Vol. = " + str(round(total_flood, 3)) + "MG"
# _, top = plt.gca().get_ylim()
# flood_max = top * 0.85
# plt.text(0, flood_max, flood_str)
plt.subplot(5, 1, 5)
plt.plot(rewards, color='k')
plt.title('Rewards')
plt.ylabel('reward')
plt.xlabel('time step')
plt.tight_layout()
plt.show() | AIFloodMaster | /AIFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/controllers/PySwmmDdpgRTC/DdpgController.py | DdpgController.py |
import numpy as np
from pyswmm import Simulation, Nodes, Links
from rl.core import Env # 非本地版本
from gym import spaces # 非本地版本
class PySwmmEnv(Env):
"""基于 Env 接口的 pyswmm 强化学习环境。
该环境通过加载swmm输入文件初始化pyswmm模型, 并暴露指定的控制对象, 包括水池、孔口和节点。
该环境同时限定了实时控制的方式, 即通过调控孔口对象来维持水池目标水深和减少节点溢流。
"""
def __init__(self):
self._input_file = None # swmm输入文件
self._fcst_file = None # 降雨/潮汐时间序列文件
self._ctrl_step = None # 调度步长
self._ponds_id = None # 目标/受控水池ID
self._max_depths = {} # 目标/受控水池最大深度
self._target_depths = {} # 目标/受控水池目标深度
self._orifices_id = None # 目标/受控孔口ID
self._juncs_id = None # 目标/受控节点ID
self._rainfall_fcst = None # 降雨/潮位预报数据
self._swmm = None # 模型对象
self._total_steps = None # 模型总调度步数
self._curr_step = None # 模型当前调度步
self._ponds = None # 目标/受控水池对象
self._juncs = None # 目标/受控节点对象
self._orifices = None # 目标/受控闸孔对象
self._states = None # 强化学习环境状态
self.action_space = None # 强化学习动作空间
self.observation_space = None # 强化学习观测空间
def init(self, inp_file: str, fcst_file: str, ctrl_step: int, ctrl_ponds: list,
ponds_max_depth: list, ponds_target: list, ctrl_orifices: list,
ctrl_junctions: list):
"""初始化pyswmm模型控制环境。
为了能够更加自由的组装强化学习控制器,将初始化方法独立出来。
Args
----
+ inp_file(str): swmm 项目配置文件;
+ fcst_file(str): 降雨/潮位预报文件;
+ ctrl_step(int): 受控闸门调度时间步长(seconds);
+ ctrl_ponds(list of str): 受控水池IDs;
+ ponds_max_depth(list of float): 各受控水池的最大水深;
+ ponds_target(list of float): 各受控水池的目标水深;
+ ctrl_orifices(list of str): 受控孔口/闸门IDs;
+ ctrl_junctions(list of str): 受控节点IDs;
"""
self.reset()
self._input_file = inp_file
self._fcst_file = fcst_file
self._ctrl_step = ctrl_step
self._ponds_id = ctrl_ponds
self._max_depths = dict(zip(ctrl_ponds, ponds_max_depth))
self._target_depths = dict(zip(ctrl_ponds, ponds_target))
self._orifices_id = ctrl_orifices
self._juncs_id = ctrl_junctions
# 加载降雨/潮位预报数据。
self._rainfall_fcst = self._load_forecast_file()
# 初始化pyswmm模型, 获取模型对象以及总调度步数、当前调度步。
self._swmm, self._total_steps, self._curr_step = self._init_simulation()
# 获取目标/受控对象。
self._ponds, self._juncs, self._orifices = self._get_target_objects()
# 定义强化学习环境状态。
self._states = self._get_env_states()
# 定义强化学习动作空间。
self.action_space = self._get_action_space()
# 定义强化学习观测空间。
self.observation_space = self._get_observation_space()
def _load_forecast_file(self):
"""读入csv格式的降雨/潮位预报文件。
"""
fcst_data = np.genfromtxt(self._fcst_file, delimiter=',')
return fcst_data
def _init_simulation(self):
"""初始化swmm模拟引擎。
"""
simu = Simulation(self._input_file)
simu.step_advance(self._ctrl_step) # 设置模型步进步长(此处即模型调度步长)
simu.start()
# 获取调度控制步长信息。
simu_len = simu.end_time - simu.start_time
total_ctrl_steps = int(simu_len.total_seconds() / self._ctrl_step) # 总实时调度步数
curr_ctrl_step = 1 # 当前调度步
return simu, total_ctrl_steps, curr_ctrl_step
def _get_target_objects(self):
"""从模拟引擎中提取目标/控制对象。
"""
# 水池对象
node_objs = Nodes(self._swmm)
ponds = {pond: node_objs[pond] for pond in self._ponds_id}
# 汊点对象
juncs = {junc: node_objs[junc] for junc in self._juncs_id}
# 水闸对象
link_objs = Links(self._swmm)
orifices = {orifice: link_objs[orifice] for orifice in self._orifices_id}
return ponds, juncs, orifices
def _get_env_states(self):
"""定义环境状态。
[目标水池深度] + [水池的溢流状态] + [目标节点的溢流状态] + [目标水闸的相对开度] + [降雨/潮位预报]
"""
pond_depth_states = [pond.depth for pond in self._ponds.values()] # 目标水池的深度状态
pond_flooding_states = [pond.flooding
for pond in self._ponds.values()] # 目标水池的溢流状态
junc_flooding_states = [junc.flooding
for junc in self._juncs.values()] # 目标节点的溢流状态
orifice_opening_states = [
orifice.current_setting for orifice in self._orifices.values()
] # 目标水闸的相对开度状态
fcst_states = self._rainfall_fcst[self._curr_step].tolist() # 降雨/潮位预报状态
states = pond_depth_states + pond_flooding_states + junc_flooding_states
states = states + orifice_opening_states + fcst_states
states = np.asarray(states)
return states
def _get_action_space(self):
"""定义目标水闸的开度(动作)范围。
"""
low_bounds = [0.0 for _ in self._orifices_id]
high_bounds = [1.0 for _ in self._orifices_id]
action_space = spaces.Box(low=np.array(low_bounds),
high=np.array(high_bounds),
shape=(len(self._orifices_id), ),
dtype=np.double)
return action_space
def _get_observation_space(self):
"""定义观察空间。(维度与状态空间一致)
"""
observation_space = spaces.Box(low=0,
high=1000,
shape=(len(self._states), ),
dtype=np.float32)
return observation_space
def step(self, actions) -> tuple:
"""执行控制动作并按调度步长执行模拟,更新模型,计算回报。
Args
----
+ actions: 各受控孔口的相对开度状态。
Returns
----
返回(当前状态, 奖励值, 模型是否结束, 其他模型信息)。
"""
# 配置动作
for orifice, action in zip(self._orifices_id, actions):
action = min(max(float(action), 0.0), 1.0)
self._orifices[orifice].target_setting = action
# 推进到下一步。
# self._swmm.__next__()
self._swmm.step_advance(self._ctrl_step)
# 获取当前系统状态。
self._states = self._get_env_states()
# 计算奖励。
pond_num = len(self._ponds_id)
junc_num = len(self._juncs_id)
if np.sum(self._rainfall_fcst[self._curr_step, :-1]) > 0.: # 检查预报降雨状态
# 降雨期以预防洪水为控制目标。
pond_flooding = sum(self._states[pond_num:2 * pond_num])
junc_flooding = sum(self._states[2 * pond_num:2 * pond_num + junc_num])
reward = -(pond_flooding + junc_flooding)
else:
# 干旱期以预防节点溢流和保持蓄水池水位为控制目标。
junc_flooding = sum(self._states[2 * pond_num:2 * pond_num + junc_num])
pond_depth = sum([
abs(self._ponds[pond].depth - self._target_depths[pond])
for pond in self._ponds_id
])
reward = -(junc_flooding + pond_depth)
# 判断是否模拟结束。
if self._curr_step < self._total_steps - 1:
done = False
else:
done = True
self._curr_step += 1
info = {} # 其他信息。
return self._states, reward, done, info
def reset(self) -> np.ndarray:
"""重启模型。
"""
if not self._swmm:
return None
self._swmm.close()
self._swmm, self._total_steps, self._curr_step = self._init_simulation()
self._states = self._get_env_states()
return self._states
def close(self):
"""关闭模型。
"""
self._swmm.report()
self._swmm.close()
@property
def states(self):
return self._states
@property
def configs(self):
"""
获取环境配置。
"""
configs = {}
configs['input_file'] = self._input_file
configs['fcst_file'] = self._fcst_file
configs['ctrl_step'] = self._ctrl_step
configs['ponds_id'] = self._ponds_id
configs['ponds_target_depth'] = self._target_depths
configs['orifices_id'] = self._orifices_id
configs['juncs_id'] = self._juncs_id
return configs
def __del__(self):
self.close()
def __str__(self):
return '<{} instance>'.format(type(self).__name__)
def get_swmm_file(self) -> str:
"""获取swmm输入文件。
Returns
----
返回swmm文件全路径名。
"""
return self._input_file
def get_ctrl_nodes_id(self) -> tuple:
"""获取受控水池和节点对象Ids.
Returns
----
返回受控水池和节点对象IDs。
"""
return self._ponds_id, self._juncs_id
def get_ctrl_links_id(self) -> list:
"""获取受控水闸对象Ids.
Returns
----
返回受控水闸对象IDs。
"""
return self._orifices_id | AIFloodMaster | /AIFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/controllers/PySwmmDdpgRTC/DdpgEnv.py | DdpgEnv.py |
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
import os
import joblib
import json
class StdScaler():
"""
采用 sklearn 的 StandardScaler 的数据标准化工具,其归一化原理为:
先通过计算数据集中特征的均值、标准差,对每个特征进行独立居中和缩放;
然后,将平均值和标准偏差存储起来,在以后的测试集上有相同比例来缩放。
标准化是对列操作的,一维数组每列中只有一个值,无法计算。解决办法是,
通过reshape(-1, 1),将一维数组改为二维数组。
"""
def __init__(self, ID: str):
"""设置标准缩放器的基本配置。
Args
----
+ ID(str): 定标器ID
"""
self._scaler = StandardScaler()
self._id = ID
self._fitted = False # 标准缩放器是否训练好的标识
def fit_transform(self, train_df: pd.DataFrame) -> np.array:
"""计算并存储数据集各列的均值、标准差,并对数据集执行标准化。
Args
----
+ train_df(pd.DataFrame): 训练数据集;
Returns
----
返回标准化之后的数据集。
"""
self._fitted = False
train_df_scaled = self._scaler.fit_transform(train_df)
self._fitted = True
return train_df_scaled
def fit(self, train_df: pd.DataFrame):
"""计算并存储数据集各列的均值、标准差。
Args
----
+ train_df(pd.DataFrame): 训练数据集;
"""
self._fitted = False
self._scaler.fit(train_df)
self._fitted = True
def partial_fit(self, train_df: pd.DataFrame):
"""计算并存储数据集各列的均值、标准差(可以保留之前训练结果作增量训练)。
Args
----
+ train_df(pd.DataFrame): 训练数据集;
"""
self._scaler.partial_fit(train_df)
self._fitted = True
def transform(self, test_df: pd.DataFrame) -> np.array:
"""以已经训练好的标准缩放器,通过居中和缩放执行标准化。
Args
----
+ test_df(pd.DataFrame): 测试数据集;
Returns
----
返回标准化之后的数据集; 如果没有训练好的缩放器, 则返回None。
"""
if not self._fitted:
print(f"ERROR: StdScaler({self._id}) is not fitted yet.")
return None
test_df_scaled = self._scaler.transform(test_df)
return test_df_scaled
def inverse_transform(self, pred_arr: np.array) -> np.array:
"""以已经训练好的标准缩放器,将数据按比例恢复到以前的大小。
Args
----
+ pred_arr(np.array): 标准化后的数据集;
Returns
----
返回逆标准化后的数据集; 如果没有训练好的缩放器, 则返回None。
"""
if not self._fitted:
print(f"ERROR: StdScaler({self._id}) is not fitted yet.")
return None
pred_arr_anti = self._scaler.inverse_transform(pred_arr)
return pred_arr_anti
def is_fitted(self) -> bool:
"""缩放器是否经过训练。
"""
return self._fitted
def save(self, scaler_file: str, property_file: str):
"""将缩放器保存到本地。
Args
----
+ scaler_file(str): 保存文件名(.pkl文件, 完整路径);
+ property_file(str): 保存缩放器器属性文件名(.json文件, 完整路径);
"""
# 保持缩放器。
scaler_path = os.path.dirname(scaler_file)
if not os.path.exists(scaler_path):
os.makedirs(scaler_path)
joblib.dump(self._scaler, scaler_file)
# 保存缩放器属性。
property_path = os.path.dirname(property_file)
if not os.path.exists(property_path):
os.makedirs(property_path)
with open(property_file, 'w', encoding='utf8') as fo:
json.dump({"fitted": self._fitted}, fo)
def set_scaler(self, scaler: StandardScaler, fitted: bool):
"""直接设置(训练好的)数据缩放器。
Args
----
+ scaler(StandardScaler): 训练好的缩放器;
+ fitted(bool): 缩放器是否是训练过;
"""
self._scaler = scaler
self._fitted = fitted
@staticmethod
def load(ID: str, scaler_file: str, property_file: str):
"""从本地加载到缩放器。
Args
----
+ ID(str): 定标器ID;
+ scaler_file(str): 本地缩放器文件名(.pkl文件, 完整路径);
+ property_file(str): 保存缩放器器属性文件名(.json文件, 完整路径);
"""
with open(property_file, 'r', encoding='utf8') as fi:
encoder_properties = json.load(fi)
fitted = encoder_properties['fitted']
scaler = StdScaler(ID)
scaler.set_scaler(joblib.load(scaler_file), fitted)
return scaler | AIFloodMaster | /AIFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/utils/DataScaler.py | DataScaler.py |
import pandas as pd
import numpy as np
import datetime
from typing import Any
class DatasetPreprocessor():
"""数据集前处理工具。
"""
@staticmethod
def convert_to_datetime(df: pd.DataFrame,
date_col: str,
date_fmt: str,
is_sort: bool = False,
inplace: bool = False) -> pd.DataFrame:
"""将数据集中日期列转为datetime.datetime类型。
注意, 该方法中没有对NAN等异常值的检查。如果数据转换失败, 会抛出相应异常。
Args
----
+ df(pd.DataFrame): 数据集;
+ date_col(str): 数据集中日期列ID;
+ date_fmt(str): 数据集中日期格式;
+ is_sort(bool): 是否按日期列排序数据集;
+ inplace(bool): 是否更改原始数据集;
Returns
----
时间戳转换后的数据集。
"""
assert isinstance(df, pd.DataFrame), "requiring a pd.DataFrame as input"
df_new = df if inplace else df.copy()
if df_new.empty:
return df_new
# 提取时间戳。
def str_to_datetime(date_str):
return datetime.datetime.strptime(date_str, date_fmt)
is_converted = True
if date_col in df_new.columns:
if np.nan in df_new[date_col]:
raise IOError("could not convert np.nan to datetime.")
for date in df_new[date_col]:
if isinstance(date, datetime.datetime):
continue
if isinstance(date, str):
is_converted = False
else:
raise IOError(
f"invalid dtype({type(date)}), only convert str to datetime.")
if not is_converted:
df_new[date_col] = list(map(str_to_datetime, df_new[date_col]))
# 按时间戳排序数据集。
if is_sort:
df_new.sort_values(by=date_col, inplace=True)
return df_new
@staticmethod
def check_nan(df: pd.DataFrame, inplace: bool = False) -> pd.DataFrame:
"""检查并替换数据集NaN。
对于数据类型,采用线性插值;对于对象和字符串类型,采用填充插值。
Args
----
+ df(pd.DataFrame): 数据集;
+ inplace(bool): 是否更改原始数据集;
Returns
----
数据检查后的数据集。
"""
assert isinstance(df, pd.DataFrame), "requiring a pd.DataFrame as input"
df_new = df if inplace else df.copy()
if df_new.empty:
return df_new
# 数值双向线性插值(‘linear’无法处理datetime对象)。
df_new.interpolate(method='linear', limit_direction='both', inplace=True)
# 字符串前向插值('pad'无法处理非numeric和datetime类型的索引)。
df_new.interpolate(method='pad', limit_direction='forward', inplace=True)
# 再次检查NaN值并采用向后填充。
df_new.fillna(method='bfill', inplace=True)
# 最后替换依然存在的NAN值(全列均为NAN)。
df_new.fillna(value=0, inplace=True)
return df_new
@staticmethod
def check_datetime_continuous(df: pd.DataFrame, date_col: str, date_fmt: str,
time_step: int) -> pd.DataFrame:
"""检查数据集是否为连续时间序列(并去重), 并向后填充缺失值。
注意, 该方法会将指定的日期列转为datetime.datetime类型并排序。同时, 该方法中
对起始时间没有规定,所以重整后的时间序列均从数据集中的第一个开始计算。
Args
----
+ df(pd.DataFrame): 数据集;
+ date_col(str): 数据集中日期列ID;
+ date_fmt(str): 数据集中日期格式;
+ time_step(int): 数据集重采样时间步长(seconds):
Returns
----
数据重采样后的数据集。
"""
assert isinstance(df, pd.DataFrame), "requiring a pd.DataFrame as input"
df_new = df.copy()
if df_new.empty:
return df_new
# 检查日期列(包括数据类型和排序状态)。
DatasetPreprocessor.convert_to_datetime(df_new, date_col, date_fmt, True, True)
# 统计数值类型的数据列。
num_cols = []
for col in df_new.columns:
if df_new[col].dtype == int or df_new[col].dtype == float:
num_cols.append(col)
# 根据步长,检查时间序列连续性,并采用‘pad’模式填充。
df_refactor = df_new[0:1]
pre_line = df_refactor.iloc[0]
for idx, line in df_new[1:].iterrows():
cur_df = df_new.loc[[idx]]
cur_date = line[date_col]
dt = cur_date - pre_line[date_col]
steps = int(dt.total_seconds() / time_step)
curr_date = pre_line[date_col]
for i in range(steps): # 数值类型采用线性插值填充,其他类型采用向后填充
curr_date += datetime.timedelta(seconds=time_step) # 更新时间戳
new_df = cur_df.copy()
new_df[date_col] = curr_date
alpha = (i + 1) / steps
for col in num_cols:
val = pre_line[col] * (1.0 - alpha) + new_df[col] * alpha
new_df[col] = val
df_refactor = pd.concat([df_refactor[:], new_df], ignore_index=True)
pre_line = cur_df.iloc[0] # 更新前一步时间戳
df_new = df_refactor
return df_new
@staticmethod
def intercept_by_cols(df: pd.DataFrame, index_col: str,
index_selected: Any) -> pd.DataFrame:
"""通过列(一般为ID)筛选数据集。
Args
----
+ df(pd.DataFrame): 数据集;
+ index_col(str): 数据集列ID;
+ index_selected(Any): 数据列中筛选保留的值;
Returns
----
数据筛选后的新数据集。
"""
assert isinstance(df, pd.DataFrame), "requiring a pd.DataFrame as input"
df_new = pd.DataFrame()
if index_col in df.columns:
if index_selected:
df_new = df[(df[index_col] == index_selected)] # df_new 不再是 df 的视图
return df_new
@staticmethod
def intercept_by_datetime(df: pd.DataFrame,
date_col: str,
date_fmt: str,
start_date: str = None,
end_date: str = None) -> pd.DataFrame:
"""根据日期时间筛选数据集。
注意, 该方法会将指定的日期列转为datetime.datetime类型并排序。
Args
----
+ df(pd.DataFrame): 数据集;
+ date_col(str): 数据集中日期列ID;
+ date_fmt(str): 数据集中日期格式;
+ start_date(str): 需要筛选的时间段的起始时间戳(格式须和`date_fmt`一致,下同);
+ end_date(str): 需要筛选的时间段的结束时间戳(不包含);
Returns
----
数据筛选后的新数据集。
"""
assert isinstance(df, pd.DataFrame), "requiring a pd.DataFrame as input"
df_new = df.copy()
# 提取时间戳并排序。
DatasetPreprocessor.convert_to_datetime(df_new, date_col, date_fmt, True, True)
# 按时间截取数据集。
if start_date:
beg_date = datetime.datetime.strptime(start_date, date_fmt)
df_new = df_new[(df_new[date_col] >= beg_date)]
if end_date:
stop_date = datetime.datetime.strptime(end_date, date_fmt)
df_new = df_new[(df_new[date_col] < stop_date)] # 不包含stop_date
return df_new
@staticmethod
def delete_by_cols(df: pd.DataFrame,
cols_del: list,
inplace: bool = False) -> pd.DataFrame:
"""删除数据集中指定列。
Args
----
+ df(pd.DataFrame): 数据集;
+ cols_del(list of str): 待删除的数据列Ids;
+ inplace(bool): 是否更改原始数据集;
Returns
----
数据筛选后的数据集。
"""
assert isinstance(df, pd.DataFrame), "requiring a pd.DataFrame as input"
df_new = df if inplace else df.copy()
if df_new.empty:
return df_new
for col in df.columns:
if col in cols_del:
del df_new[col]
return df_new
@staticmethod
def check_numeric_cols(df: pd.DataFrame, inplace: bool = False) -> pd.DataFrame:
"""将数据集中所有数值类型的列转为数值类型。
注意,整数类型和浮点数类型将统一转为浮点数类型。另外, 要求数据集中不存在 NAN等非法数据。
Args
----
+ df(pd.DataFrame): 数据集;
+ inplace(bool): 是否更改原始数据集;
Returns
----
类型重置后的数据集。
"""
assert isinstance(df, pd.DataFrame), "requiring a pd.DataFrame as input"
df_new = df if inplace else df.copy()
if df_new.empty:
return df_new
for col in df_new.columns:
fst_elem = df_new[col].iloc[0]
if isinstance(fst_elem, str) and _is_num_in_str(fst_elem):
df_new[col] = df_new[col].astype(np.float64)
return df_new
def _is_num_in_str(str_elem):
"""判断字符串是否为数值类型(包括整数、浮点数、负数)。
"""
if str_elem[0] == '-': # 负数
str_elem = str_elem[1:]
if str_elem.isdigit(): # 整数
return True
str_arr = str_elem.split('.') # 浮点数
if len(str_arr) > 2:
return False
for s in str_arr:
if not s.isdigit():
return False
return True
if __name__ == "__main__":
dic = {
"a": ['a', 'ab', 'abc', np.nan],
'b': [np.nan, 12, np.nan, 22],
'TM': ['2022/1/1', '2022/1/2', '2022/1/4', '2022/1/3']
}
df = pd.DataFrame(dic)
print(id(df))
DatasetPreprocessor.check_nan(df, True)
print(id(df))
res = DatasetPreprocessor.check_datetime_continuous(df, 'TM', "%Y/%m/%d", 3 * 3600)
print(id(res))
res = DatasetPreprocessor.intercept_by_cols(res, 'b', 12.0)
print(id(res))
res = DatasetPreprocessor.intercept_by_datetime(res, 'TM', '%Y/%m/%d %H:%M:%S',
'2022/1/1 3:0:0', '2022/1/1 21:0:0')
print(id(res))
res = DatasetPreprocessor.delete_by_cols(res, ['a'], True)
print(df)
print(id(df))
print(res)
print(id(res)) | AIFloodMaster | /AIFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/utils/DataPreprocessor.py | DataPreprocessor.py |
from filterpy.kalman import KalmanFilter as KF
from filterpy import discrete_bayes as DB
import numpy as np
class KalmanFilter():
"""卡尔曼滤波滤波器。
"""
def __init__(self,
dim_x: int = 1,
dim_z: int = 1,
Q: list = None,
R: list = None,
P: list = None,
F: list = None,
H: list = None):
"""初始化卡尔曼滤波器。
Args
----
+ dim_x(int): 状态变量个数;
+ dim_z(int): 观测变量个数;
+ Q(list): 状态噪声;
+ R(list): 测量噪声;
+ P(list): 状态协方差矩阵;
+ F(list): 状态转移矩阵;
+ H(list): 测量矩阵;
"""
# if R and len(R) != dim_z:
# raise RuntimeError("All observations' noise should be given in R.")
# if Q and len(Q) != dim_x:
# raise RuntimeError("All states' noise should be given in Q.")
# filter
self._kf = KF(dim_x, dim_z) # dim_x:隐状态大小,dim_z:量测大小
# 定义参数
self._kf.x = np.zeros(dim_x).reshape(dim_x, 1) # 初始状态值, 在利用观测值进行更新时逐渐趋近真实值
self._kf.P = np.eye(dim_x) if P is None else P # 状态协方差矩阵, 默认状态间独立不相关
self._kf.F = np.eye(dim_x) if F is None else F # 状态转移矩阵, 默认状态间独立不相关
default_H = np.zeros([dim_z, dim_x])
default_H[0][0] = 1.0
self._kf.H = default_H if H is None else H # 量测矩阵, 默认只取第一个状态值
self._kf.Q = np.diag([1.] * dim_x) if Q is None else np.diag(Q) # 预测噪声, 默认为1.0
self._kf.R = np.diag([1.] * dim_z) if R is None else np.diag(R) # 量测噪声, 默认为1.0
def filter(self, measurements: list) -> list:
"""预测更新。
Args
----
+ measurements(list): 观测值序列;
Returns
----
返回校正结果。
"""
inputs = np.array(measurements).reshape(-1, self._kf.dim_z)
filter_result = list()
for z in inputs:
self._kf.predict()
self._kf.update(z)
filter_result.append(self._kf.x)
return np.squeeze(np.array(filter_result))
class KalmanFilter2():
"""一维卡尔曼滤波器, 用于滤波实测状态。
"""
def __init__(self, Q: float, R: float):
"""一维简化卡尔曼滤波器。
Args
----
+ Q(float): 预测预估误差;
+ R(float): 测量误差;
"""
self._Q = Q
self._R = R
self._accumulate_err = 1
self._last_pred = 0
def filter(self, obs: float, pred: float) -> float:
"""预测更新。
Args
----
+ obs(float): 当前观测值
Returns
----
返回校正值。
"""
# 检查新值和旧值的差异是否在合理范围内
old_val = None
if abs(self._last_pred) > 1.e-4 and abs(
(obs - self._last_pred) / self._last_pred) > 0.25:
old_val = obs * 0.382 + self._last_pred * 0.618
else:
old_val = self._last_pred
# 计算总误差:累计误差^2 + 预估误差^2
old_err = (self._accumulate_err**2 + self._Q**2)**(1 / 2)
# 计算 H
H = old_err**2 / (old_err**2 + self._R**2)
# 预测
pred = old_val + H * (obs - old_val)
# 更新累积误差
self._accumulate_err = ((1 - H) * old_err**2)**(1 / 2)
self._last_pred = pred
return pred
class BayesFilter():
"""离散贝叶斯滤波器。
"""
def __init__(self):
"""
"""
pass
if __name__ == "__main__":
import matplotlib.pyplot as plt
# KalmanFilter2
array = np.array([50] * 500)
mu, sigma = 0, 3
s = np.random.normal(mu, sigma, 500)
test_array = array + s
plt.plot(test_array)
kf2 = KalmanFilter2(1.e-4, 1.e-1)
adc = []
for i in range(500):
adc.append(kf2.filter(test_array[i]))
plt.plot(adc)
plt.plot(array)
plt.show()
# KalmanFilter
measurements = np.linspace(1, 500, 500)
mu, sigma = 0, 3
noise = np.random.normal(mu, sigma, 500)
z_noise = measurements + noise
plt.plot(z_noise, label="z_noise")
dim_x, dim_z = 2, 1
P = np.array([[1, 0], [0, 1]])
F = np.array([[1, 1], [0, 1]])
Q = np.array([[0.001, 0], [0, 0.0001]])
H = np.array([[1, 0]])
R = np.array([1])
kf = KalmanFilter(dim_x, dim_z, Q, R, P, F, H)
z_corr = [v[0] for v in kf.filter(z_noise)]
plt.plot(z_corr, label="z_corr")
plt.show()
# 1d KalmanFilter
kf3 = KalmanFilter(1, 1, [1.e-4], [1.e-1])
corr = kf3.filter(test_array)
plt.plot(test_array)
plt.plot(array)
plt.plot(corr)
plt.show() | AIFloodMaster | /AIFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/utils/DataFilters.py | DataFilters.py |
import category_encoders as ce
import pandas as pd
import joblib
import os
import json
class LooEncoder():
"""采用 LeaveOneOut 方法编码类别变量,从而可进行机器学习。
由于该方法属于有监督方法,要求数据集中标签变量为非类别变量。
同时,该编码方法可以适应不断增加的类别。
此外,该编码方式为不唯一编码、无法逆编码,不适合标签的编码。
"""
def __init__(self, ID: str, features: list = None):
"""初始化编码器的基本配置。
Args
----
+ ID(str): 编码器ID;
+ features(list of str): 待编码的类别变量IDs(默认对所有字符型变量编码);
"""
self._encoder = ce.LeaveOneOutEncoder(cols=features, return_df=True)
self._id = ID
self._fitted = False
def fit_transform(self, features_df: pd.DataFrame,
label_df: pd.DataFrame) -> pd.DataFrame:
"""监督训练编码器,并对指定的类别变量编码。
训练中采用的`features_df`的向量形状(标签数)将在编码器中固定下来,后续
编码`transform()`中输入的`features_df`的标签数必须与此一致。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
+ label_df(pd.DataFrame): 标签变量数据集(标签变量必须为数值类型);
Returns
----
返回类别编码后的数据集。
"""
features_df_encoded = self._encoder.fit_transform(features_df, label_df)
self._fitted = True
return features_df_encoded
def fit(self, features_df: pd.DataFrame, label_df: pd.DataFrame):
"""监督训练编码器。
训练中采用的`features_df`的向量形状(标签数)将在编码器中固定下来,后续
编码`transform()`中输入的`features_df`的标签数必须与此一致。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
+ label_df(pd.DataFrame): 标签变量一维数据集(标签变量必须为数值类型);
"""
self._encoder.fit(features_df, label_df)
self._fitted = True
def transform(self,
features_df: pd.DataFrame,
label_df: pd.DataFrame = None) -> pd.DataFrame:
"""通过已经训练好的编码器编码类别变量。
通常对于训练集,需要继续提供标签数据;而对于测试集则不需要。
编码时要求输入的`features_df`的标签数必须与训练时一致。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
+ label_df(pd.DataFrame): 标签变量一维数据集(标签变量必须为数值类型);
Returns
----
返回编码后的数据集;若编码器未经训练, 则返回`None`。
"""
if not self._fitted:
return None
df_encoded = self._encoder.transform(features_df, label_df)
return df_encoded
@property
def features(self) -> list:
"""编码器编码的特征变量IDs。
"""
return self._encoder.get_feature_names()
def save(self, encoder_file: str):
"""将编码器保存到本地。
Args
----
+ encoder_file(str): 保存文件名(.pkl文件, 完整路径);
"""
encoder_path = os.path.dirname(encoder_file)
if not os.path.exists(encoder_path):
os.makedirs(encoder_path)
joblib.dump(self._encoder, encoder_file)
def set_encoder(self, encoder: ce.LeaveOneOutEncoder):
"""直接设置(训练好的)类别编码器。
Args
----
+ encoder(LeaveOneOutEncoder): 训练好的编码器;
"""
self._encoder = encoder
self._fitted = True
@staticmethod
def load(encoder_file, ID: str):
"""从本地加载到编码器。
Args
----
+ encoder_file(str): 本地编码器文件名(.pkl文件, 完整路径);
+ ID(str): 编码器ID;
"""
encoder = LooEncoder(ID)
encoder.set_encoder(joblib.load(encoder_file))
return encoder
class OrdinalEncoder():
"""采用 OrdinalEncoder 方法实现类别变量的编码。
该方法属于无监督方法,所以不需要标签数据,但是支持结合标签数据训练。
该方法支持用户指定类别编码,从而为类别编码提供先验知识。
该方法支持逆编码, 同时可以适应不断增加的类别。
"""
def __init__(self, ID: str, features: list = None, mapping: list = None):
"""初始化编码器的基本配置。
如果指定编码,则需要指定所有`features`中列出的类别变量的自定义编码;同时,
指定每个类别变量编码时,需要指定数据集中所有类别的编码(未指定的均编码-1)。
Args
----
+ ID(str): 编码器ID;
+ features(list of str): 待编码的类别变量IDs(默认对所有字符型变量编码);
+ mapping(list of dict): 自定义编码([{'col':'col_id', 'mapping':{'v1':1, 'v2:2}}]);
"""
self._encoder = ce.OrdinalEncoder(cols=features,
mapping=mapping,
return_df=True)
self._id = ID
self._mapping = mapping
self._features = features
self._fitted = False
def fit_transform(self,
features_df: pd.DataFrame,
label_df: pd.DataFrame = None) -> pd.DataFrame:
"""训练编码器,并对指定的类别变量编码。
训练中采用的`features_df`的向量形状(标签数)将在编码器中固定下来,后续
编码`transform()`中输入的`features_df`的标签数必须与此一致。
标签数据'label_df'为可选配置。
当前接口会重置编码器。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
+ label_df(pd.DataFrame): 标签变量一维数据集(标签变量必须为数值类型);
Returns
----
返回类别编码后的数据集。
"""
# 训练编码器。
features_df_encoded = self._encoder.fit_transform(features_df, label_df)
# 记录编码变量。
self._record_encoded_features(features_df)
# 记录编码关系。
self._record_mapping(features_df)
self._fitted = True
return features_df_encoded
def _record_encoded_features(self, features_df):
"""记录实际被编码处理的特征。
"""
self._features = []
features_all = self._encoder.get_feature_names()
for f in features_all:
if features_df[f].dtype == object: # 而非str类型
self._features.append(f)
def _record_mapping(self, features_df):
"""记录编码关系。
"""
# 统计类别变量。
tmp_feat_dic = {}
for f in self._features:
categories_f = list(set(features_df[f]))
tmp_feat_dic[f] = categories_f
# 统计数据长度。
size = 0
for Id, val in tmp_feat_dic.items():
size = max(size, len(val))
# 重构数据集。
for Id, val in tmp_feat_dic.items():
if len(val) < size:
dn = size - len(val)
val += [val[-1] for i in range(dn)]
tmp_feat_dic[Id] = val
for Id in self._encoder.get_feature_names():
if Id not in self._features:
dic = [0 for i in range(size)]
tmp_feat_dic[Id] = dic
# 类型编码和记录。
self._mapping = []
res = self._encoder.transform(pd.DataFrame(tmp_feat_dic))
for Id, categories in tmp_feat_dic.items():
res_id = res[Id]
map_id = {None: 0}
for i in range(len(categories)):
map_id[categories[i]] = int(res_id[i]) # 为了后期json保存
dic_id = {}
dic_id['col'] = Id
dic_id['mapping'] = map_id
self._mapping.append(dic_id)
def fit(self, features_df: pd.DataFrame, label_df: pd.DataFrame = None):
"""训练编码器。
训练中采用的`features_df`的向量形状(标签数)将在编码器中固定下来,后续
编码`transform()`中输入的`features_df`的标签数必须与此一致。
标签数据'label_df'为可选配置。
当前接口会重置编码器。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
+ label_df(pd.DataFrame): 标签变量一维数据集(标签变量必须为数值类型);
"""
# 训练编码器。
self._encoder.fit(features_df, label_df)
# 记录编码变量。
self._record_encoded_features(features_df)
# 记录编码关系。
self._record_mapping(features_df)
self._fitted = True
def transform(self, features_df: pd.DataFrame) -> pd.DataFrame:
"""通过已经训练好的编码器编码类别变量。
通常对于训练集,需要继续提供标签数据;而对于测试集则不需要。
编码时要求输入的`features_df`的标签数必须与训练时一致。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
Returns
----
返回编码后的数据集;若编码器未经训练, 则返回`None`。
"""
if not self._fitted:
print(f"ERROR: CategoryEncoder({self._id}) is not fitted yet.")
return None
if not self._features:
return features_df
df_encoded = self._encoder.transform(features_df)
return df_encoded
def partial_fit(self, features_df: pd.DataFrame, label_df: pd.DataFrame = None):
"""增量训练编码器。
训练中采用的`features_df`的向量形状(标签数)将在编码器中固定下来,后续
编码`transform()`中输入的`features_df`的标签数必须与此一致。
标签数据'label_df'为可选配置。
Args
----
+ features_df(pd.DataFrame): 待编码类别变量数据集(样本数*标签数);
+ label_df(pd.DataFrame): 标签变量一维数据集(标签变量必须为数值类型);
"""
# 提取原始映射关系并加载新增类别。
if self._fitted and self._features:
for feat in self._features:
idx = self._search_categories(feat)
mapping = self._mapping[idx]['mapping']
categories = mapping.keys()
max_codes = max(mapping.values())
for Id in list(set(features_df[feat])): # 加载新增类别
if Id not in categories:
max_codes += 1
mapping[Id] = max_codes
self._encoder = ce.OrdinalEncoder(cols=self._features,
mapping=self._mapping,
return_df=True)
# 编码器训练和记录。
self._encoder.fit(features_df, label_df)
if not self._fitted: # 保存并防止覆盖训练结果
self._record_encoded_features(features_df)
self._record_mapping(features_df)
self._fitted = True
def _search_categories(self, feature):
"""搜索类别。
"""
if feature not in self._features:
return None
for i in range(len(self._mapping)):
if self._mapping[i]['col'] == feature:
return i
return None
def inverse_transform(self, encoded_features_df: pd.DataFrame) -> pd.DataFrame:
"""将编码结果逆编码为类别。
Args
----
+ encoded_features_df(pd.DataFrame): 编码后的特征数据集;
Returns
----
返回逆编码后的数据集;若编码器未经训练, 则返回`None`.
"""
if not self._fitted:
print(f"ERROR: CategoryEncoder({self._id}) is not fitted yet.")
if not self._features:
return encoded_features_df
# 逆编码(自定义实现;ce.OrdinalEncoder无法在指定mapping的情况下逆编码)。
# inversed_features = self._encoder.inverse_transform(encoded_features_df)
inversed_features = {}
for feat in encoded_features_df.columns:
if feat not in self._features:
inversed_features[feat] = encoded_features_df[feat]
continue
feat_map = self._mapping[self._search_categories(feat)]['mapping']
inversed_map = dict(zip(feat_map.values(), feat_map.keys()))
inversed_feat = list(
map(lambda x: inversed_map[x], encoded_features_df[feat]))
inversed_features[feat] = inversed_feat
return pd.DataFrame(inversed_features)
@property
def features(self) -> list:
"""编码器编码的特征变量IDs。
"""
return self._features
def save(self, encoder_file: str, property_file: str):
"""将编码器保存到本地。
Args
----
+ encoder_file(str): 保存编码器文件名(.pkl文件, 完整路径);
+ property_file(str): 保存编码器属性文件名(.json文件, 完整路径);
"""
# 保存编码器。
encoder_path = os.path.dirname(encoder_file)
if not os.path.exists(encoder_path):
os.makedirs(encoder_path)
joblib.dump(self._encoder, encoder_file)
# 保存编码器属性。
property_path = os.path.dirname(property_file)
if not os.path.exists(property_path):
os.makedirs(property_path)
with open(property_file, 'w', encoding='utf8') as fo:
json.dump(
{
"mapping": self._mapping,
"features": self._features,
"fitted": self._fitted
}, fo)
def set_encoder(self, encoder: ce.OrdinalEncoder, features: list, mapping: list,
fitted: bool):
"""直接设置(训练好的)类别编码器。
Args
----
+ encoder(LeaveOneOutEncoder): 训练好的编码器;
+ features(list of str): 待编码的类别变量IDs(默认对所有字符型变量编码);
+ mapping(list of dict): 自定义编码;
+ fitted(bool): 是否是训练过的编码器;
"""
self._encoder = encoder
self._features = features
self._mapping = mapping
self._fitted = fitted
@staticmethod
def load(ID: str, encoder_file: str, property_file: str):
"""从本地加载到编码器。
Args
----
+ ID(str): 编码器ID;
+ encoder_file(str): 本地编码器文件名(.pkl文件, 完整路径);
+ property_file(str): 保存编码器属性文件名(.json文件, 完整路径);
"""
with open(property_file, 'r', encoding='utf8') as fi:
encoder_properties = json.load(fi)
features = encoder_properties['features']
mapping = encoder_properties['mapping']
fitted = encoder_properties['fitted']
encoder = OrdinalEncoder(ID)
encoder.set_encoder(joblib.load(encoder_file), features, mapping, fitted)
return encoder
if __name__ == "__main__":
data = pd.DataFrame({
'ID': [1, 2, 3, 4, 5, 6, 7, 8],
'Sex': ['F', 'M', 'M', 'F', 'M', None, 'F', 'M'],
'BloodType': ['A', 'AB', 'O', 'B', None, 'O', 'AB', 'B'],
'Grade': ['High', 'High', 'Medium', 'Low', 'Low', 'Medium', 'Low', 'High'],
'Education': [
'PhD', 'HighSchool', 'Bachelor', 'Master', 'HighSchool', 'Master', 'PhD',
'Bachelor'
],
'Income': [28300, 4500, 7500, 12500, 4200, 15000, 25000, 7200]
})
Income_grand_mean = data['Income'].mean()
data['Income_grand_mean'] = [Income_grand_mean] * len(data)
Income_group = data.groupby('Education')['Income'].mean().rename(
'Income_level_mean').reset_index()
data_new = pd.merge(data, Income_group)
features = list(data_new.columns)
features.remove('Income')
print(data_new)
# 编码器测试
features_train = data_new[['Grade']]
features_test = pd.DataFrame({'Grade': ['High', 'High', 'Medium2', 'Low2']})
mapping = [{'col': 'Grade', 'mapping': {'High': 1, 'Low': 2, 'Medium': 3}}]
features = ['Grade']
encoder = OrdinalEncoder('id1')
encoder.partial_fit(features_train)
res = encoder.transform(features_train)
print(res)
res = encoder.inverse_transform(res)
print(res)
encoder.partial_fit(features_test)
res = encoder.transform(features_train)
print(res)
res = encoder.inverse_transform(res)
print(res)
res = encoder.transform(features_test)
print(res)
res = encoder.inverse_transform(res)
print(res)
encoder_f = "./encoder.pkl"
property_f = "./encoder.json"
encoder.save(encoder_f, property_f)
encoder2 = OrdinalEncoder.load(encoder_f, property_f, 'id2')
res = encoder2.transform(features_test)
print(res)
res = encoder2.inverse_transform(res)
print(res) | AIFloodMaster | /AIFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/utils/CategoryEncoder.py | CategoryEncoder.py |
import pandas as pd
import numpy as np
from DataPreprocessor import DatasetPreprocessor
class CsvLoader():
"""
CSV 数据集加载工具。时序数据格式要求为 CSV 格式,同时要求:
1. 第一行为表头说明;
2. 空行不影响读取;
3. 数据集特征和标签均位于同一份文件中。
对于多站点多日期数据集,应该指定提取某个站点的数据集。
"""
def __init__(self, data_file: str, features: list, labels: list, **kwargs):
"""
Args
----
+ data_file(str): 数据集csv文件;
+ features(list of str): 数据集特征;
+ labels(list of str): 数据集标签;
+ kwargs: 可选参数,包括:
+ date_col(str): 时间戳列的列名(默认为'Date');
+ time_step(int): 数据的时间步长(seconds, 默认3600);
+ date_fmt(str): 日期数据输入格式(默认'%Y/%m/%d');
+ start_date(str): 截取数据集的起始日期(时间格式和date_fmt相同);
+ end_date(str): 截取数据集的起始日期(时间格式和date_fmt相同);
+ index_col(str): 数据集中的索引列(如站点数据集中'STCD');
+ index_selected(str): 数据集中截取的索引ID(如站点'10001');
"""
self._features = features
self._labels = labels
self._data_file = data_file
self._opt_confs = self._check_optional_args(kwargs)
if "index_col" in self._opt_confs.keys() and self._opt_confs['index_col']:
index = self._opt_confs['index_col']
self._df = pd.read_csv(self._data_file, dtype={index: str})
else:
self._df = pd.read_csv(self._data_file)
self._check_ids()
self._intercept_dataset_by_index()
self._check_nan()
self._intercept_dataset_by_datetime()
self._check_continuous()
self._intercept_dataset_by_columns()
def _check_optional_args(self, kwargs):
"""解析数据集加载的可选配置参数。
"""
args_keys = kwargs.keys()
confs = {}
if 'date_col' in args_keys:
confs['date_col'] = kwargs['date_col']
else:
confs['date_col'] = 'Date'
if 'time_step' in args_keys:
confs['time_step'] = int(kwargs['time_step'])
else:
confs['time_step'] = 3600
if 'date_fmt' in args_keys:
confs['date_fmt'] = kwargs['date_fmt']
else:
confs['date_fmt'] = "%Y/%m/%d"
if 'start_date' in args_keys:
confs['start_date'] = kwargs['start_date']
if 'end_date' in args_keys:
confs['end_date'] = kwargs['end_date']
if 'index_col' in args_keys:
confs['index_col'] = kwargs['index_col']
if 'index_selected' in args_keys:
confs['index_selected'] = kwargs['index_selected']
return confs
def _check_ids(self):
"""检查数据集特征和标签是否匹配。
"""
df_data_ids = self._df.columns.to_list()
miss_feats = [fid for fid in self._features if fid not in df_data_ids]
miss_labels = [lid for lid in self._labels if lid not in df_data_ids]
if miss_feats:
raise ValueError(f"Dataset({self._data_file}) miss features: {miss_feats}")
if miss_labels:
raise ValueError(f"Dataset({self._data_file}) miss labels: {miss_labels}")
def _intercept_dataset_by_index(self):
"""根据用户配置的索引截取数据集。
"""
# 读取索引列。
if 'index_col' in self._opt_confs.keys():
index = self._opt_confs['index_col']
else:
index = None
if 'index_selected' in self._opt_confs.keys():
index_selected = self._opt_confs['index_selected']
else:
index_selected = None
if index and index_selected:
self._df = DatasetPreprocessor.intercept_by_cols(self._df, index,
index_selected)
def _intercept_dataset_by_datetime(self):
"""根据用户配置的日期范围截取数据集。
"""
# 提取时间戳并排序。
date_idx = self._opt_confs['date_col']
if date_idx not in self._df.columns:
return
# 按时间截取数据集。
if 'start_date' in self._opt_confs.keys():
start_date = self._opt_confs['start_date']
else:
start_date = None
if 'end_date' in self._opt_confs.keys():
end_date = self._opt_confs['end_date']
else:
end_date = None
if start_date or end_date:
self._df = DatasetPreprocessor.intercept_by_datetime(
self._df, date_idx, self._opt_confs['date_fmt'], start_date, end_date)
def _intercept_dataset_by_columns(self):
"""删除非特征或标签数据列(注意index列和datetime列)。
"""
# 统计无效数据列。
del_cols = []
for col in self._df.columns:
if col not in self._features and col not in self._labels:
del_cols.append(col)
self._df = DatasetPreprocessor.delete_by_cols(self._df, del_cols)
self._df.reset_index()
def _check_nan(self):
"""检查数据集中是否存在NaN, 并采用线性插值。
"""
self._df = DatasetPreprocessor.check_nan(self._df)
def _check_continuous(self):
"""检查数据集是否为连续时间序列(并去重), 并向后填充缺失值。
"""
time_step = self._opt_confs['time_step']
date_idx = self._opt_confs['date_col']
date_fmt = self._opt_confs['date_fmt']
self._df = DatasetPreprocessor.check_datetime_continuous(
self._df, date_idx, date_fmt, time_step)
self._df.set_index(date_idx, inplace=True)
def get_features_dataset(self) -> pd.DataFrame:
"""特征数据集。
"""
return self._df[self._features]
def get_labels_dataset(self) -> pd.DataFrame:
"""标签数据集。
"""
return self._df[self._labels]
def get_dataset(self) -> pd.DataFrame:
"""返回包含特征和标签的数据集。
"""
return self._df
def get_loader_confs(self) -> dict:
"""获取CSV数据加载器的配置。
"""
return self._opt_confs
@property
def features(self) -> list:
"""数据集特征ids。
"""
return self._features
@property
def labels(self) -> list:
"""数据集标签ids。
"""
return self._labels
@staticmethod
def train_test_split(df: pd.DataFrame,
features: list,
labels: list,
test_ratio: float,
is_random: bool,
random_seed: int = None) -> tuple:
"""将数据集拆分为训练集和测试集。
Args
----
+ df(pd.DataFrame): 包含特征和标签的数据集;
+ features(list): 数据集特征IDs;
+ labels(list): 数据集标签IDs;
+ test_ratio(float): 拆分后测试集的占比;
+ isRandom(bool): 是否采用随机采样拆分数据集;
+ random_seed(int): 随机种子(相同的随机种子保证每次抽样结果相同);
Returns
----
返回拆分后的训练集和测试集(
(train_df_features, train_df_labels),
(test_df_features, test_df_labels))。
"""
test_df, train_df = None, None
# 输入检查
if test_ratio < 0.0 or test_ratio > 1.0:
raise ValueError(f'CsvLoader has invalid test_ratio: {test_ratio}.')
# 拆分数据集
if not is_random:
test_size = round(df.shape[0] * test_ratio)
test_size = max(test_size, 1)
train_df = df[:-test_size]
test_df = df[-test_size:]
else:
if random_seed:
np.random.seed(random_seed)
test_df = df.sample(frac=test_ratio)
train_df = df.sample(frac=1.0 - test_ratio)
# 拆分特征数据和标签数据
train_features = train_df[features]
train_labels = train_df[labels]
test_features = test_df[features]
test_labels = test_df[labels]
return ((train_features, train_labels), (test_features, test_labels)) | AIFloodMaster | /AIFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/utils/DataLoader.py | DataLoader.py |
from abc import abstractmethod, ABCMeta
from typing import Any
import numpy as np
class ABCPredictor(metaclass=ABCMeta):
"""
时序数据预报模型的统一接口类。几种基本的调用方式:
1. 方式一,
初始化(compile) -> 训练(fit) -> 评估(evaluate) -> 预报(predict) -> 保存(save).
2. 方式二,
加载(load) -> 预报(predict).
3. 方式三,
加载(load) -> 训练(fit) -> 评估(evaluate) -> 保存(save).
"""
@abstractmethod
def compile(self, confs: dict):
"""
初始化模型,配置模型结构。
Args
----
+ confs(dict): 模型配置基本信息, 包括特征IDs、标签IDs、采样步长、预报步长等。
"""
raise NotImplementedError()
@abstractmethod
def fit(self, train_x: np.array, train_y: np.array, confs: dict) -> dict:
"""
加载训练数据集,训练模型。
Args
----
+ train_x(np.array): 原始二维数据集的特征向量(sample_size*feature_size);
+ train_y(np.array): 原始二维数据集的标签向量(sample_size*label_size);
+ confs(dict): 模型训练配置信息,包括训练次数、批次大小,日志等级等。
Returns
----
返回模型训练过程信息, 例如训练集和验证集上各项指标的准确率和损失。
"""
raise NotImplementedError()
@abstractmethod
def predict(self, pred_x: np.array) -> np.array:
"""
预报数据标签向量。
Args
----
+ pred_x(np.array): 原始二维数据集的特征向量(sample_size*feature_size);
Returns
----
返回预报的标签向量。
"""
raise NotImplementedError()
@abstractmethod
def evaluate(self, test_x: np.array, test_y: np.array) -> dict:
"""
评估(测试)模型。
Args
----
+ test_x(np.array): 原始二维数据集的特征向量(sample_size*feature_size);
+ test_y(np.array): 原始二维数据集的标签向量(sample_size*label_size);
Returns
----
返回各标签的各项评估结果, 包括 mse/mae 等等。
"""
raise NotImplementedError()
@abstractmethod
def save(self):
"""
保存模型到本地。
"""
raise NotImplementedError()
@abstractmethod
def reset(self, **kwars):
"""
重置模型。
"""
raise NotImplementedError()
@staticmethod
def load(ID: str, load_path: str) -> Any:
"""
从本地加载模型。
Args
----
+ ID(str): 模型ID;
+ load_path(str): 模型加载路径;
Returns
----
返回模型对象。
"""
raise NotImplementedError()
@abstractmethod
def summary(self):
"""
输出模型的配置。
"""
raise NotImplementedError()
@property
def features(self):
"""
模型预报的特征列表IDs。
"""
return None
@property
def labels(self):
"""
模型接受的标签列表IDs。
"""
return None | AIFloodMaster | /AIFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/forecasters/predictor.py | predictor.py |
from __future__ import annotations
from predictor import ABCPredictor
from FloodMaster.utils import StdScaler, OrdinalEncoder
import os
import json
import numpy as np
import pandas as pd
from tensorflow.python.keras import models, layers, callbacks
from keras.preprocessing.sequence import TimeseriesGenerator
from sklearn.metrics import mean_absolute_error, mean_squared_error
class LstmPredictor(ABCPredictor):
"""
基于 tf2 的 LSTM 自定义时序数据预报模型。
基于 tf2 的 lstm 模型支持多次的增量训练,即同模型多次调用 `fit()` 接口。因此,
模型支持在线持续学习。
"""
def __init__(self, ID: str):
"""
初始化模型,定义数据集特征和标签。
Args
----
+ ID(str): 模型ID(要求ID唯一);
"""
self._id = ID
self._model = None
self._history = None
self._confs = {'features': [], 'labels': []}
x_encoder_id, y_encoder_id = self._get_encoder_ids()
self._x_encoder = OrdinalEncoder(x_encoder_id)
self._y_encoder = OrdinalEncoder(y_encoder_id)
x_scaler_id, y_scaler_id = self._get_scaler_ids()
self._x_scaler = StdScaler(x_scaler_id)
self._y_scaler = StdScaler(y_scaler_id)
def _get_encoder_ids(self):
"""获取模型编码器ID。
Returns
----
返回标签和特征编码器ID。
"""
x_encoder_id = f"{self._id}-encoder-feature"
y_encoder_id = f"{self._id}-encoder-label"
return x_encoder_id, y_encoder_id
def _get_scaler_ids(self):
"""获取模型缩放器ID。
Returns
----
返回标签和特征缩放器ID。
"""
x_scaler_id = f"{self._id}-scaler-feature"
y_scaler_id = f"{self._id}-scaler-label"
return x_scaler_id, y_scaler_id
def compile(self, confs: dict):
self._model = models.Sequential()
# 解析自定义网络配置
self._confs.update(self._parse_nn_settings(confs))
# 第一层LSTM网络
n_past = self._confs['n_past'] # time steps, 采用前n_past步作预报
n_features = len(self.features)
lstm_units = self._confs['lstm_units']
dropout_rate = self._confs['dropout_rate']
self._model.add(
layers.LSTM(
units=lstm_units,
input_shape=(n_past, n_features),
return_sequences=True,
# stateful=True,
# batch_input_shape=(32, n_past, n_features),
activation=self._confs['lstm_activation']))
self._model.add(layers.Dropout(rate=dropout_rate))
# 中间层LSTM网络
layper_depth = self._confs['lstm_depth']
for i in range(1, layper_depth - 1):
self._model.add(layers.LSTM(units=lstm_units, return_sequences=True))
self._model.add(layers.Dropout(rate=dropout_rate))
# 最后一层LSTM网络
self._model.add(layers.LSTM(units=lstm_units, return_sequences=False))
self._model.add(layers.Dropout(rate=dropout_rate))
# 输出层Dense网络
n_labels = len(self.labels)
n_forecast = self._confs['n_forecast'] # forecast steps, 预报未来n_forecast步
dense_units = n_labels * n_forecast # 重构输出层输出
self._model.add(layers.Dense(units=dense_units))
# 激活网络
self._model.compile(loss=self._confs['loss'],
optimizer=self._confs['optimizer'],
metrics=self._confs['metrics'])
def _parse_nn_settings(self, confs):
"""解析神经网络结构自定义配置。
"""
confs_nn = {}
confs_keys = confs.keys()
# 必须配置的参数
if not isinstance(confs['features'], list): # 特征IDs
confs_nn['features'] = []
else:
confs_nn['features'] = confs['features']
if not isinstance(confs['labels'], list): # 标签IDs
confs_nn['labels'] = []
else:
confs_nn['labels'] = confs['labels']
# 带有默认配置的参数(可选配置)
if 'n_past' in confs_keys and self.is_float_integer(confs['n_past']):
confs_nn['n_past'] = int(confs['n_past']) # 滑动窗口大小
else:
confs_nn['n_past'] = 1
if 'n_forecast' in confs_keys and self.is_float_integer(confs['n_forecast']):
confs_nn['n_forecast'] = int(confs['n_forecast']) # 预报窗口大小
else:
confs_nn['n_forecast'] = 1
if 'save_path' in confs_keys:
confs_nn['save_path'] = str(confs['save_path']) # 模型训练结果保存路径
else:
confs_nn['save_path'] = os.getcwd()
if 'lstm_units' in confs_keys and self.is_float_integer(confs['lstm_units']):
confs_nn['lstm_units'] = int(confs['lstm_units']) # lstm 层中并列单元数量
else:
confs_nn['lstm_units'] = 50
if 'lstm_activation' in confs_keys and isinstance(confs['lstm_activation'],
str):
confs_nn['lstm_activation'] = confs['lstm_activation']
else:
confs_nn['lstm_activation'] = 'tanh' # lstm 层中的激活函数
if 'lstm_depth' in confs_keys and self.is_float_integer(confs['lstm_depth']):
confs_nn['lstm_depth'] = int(confs['lstm_depth'])
else:
confs_nn['lstm_depth'] = 2 # 中间 lstm 层数(采用相同units)
if 'dropout_rate' in confs_keys and self.is_float_integer(
confs['dropout_rate']):
confs_nn['dropout_rate'] = confs['dropout_rate']
else:
confs_nn['dropout_rate'] = 0.2 # dropout 层丢失率
if 'loss' in confs_keys and isinstance(confs['loss'], str):
confs_nn['loss'] = confs['loss'] # 损失函数
else:
confs_nn['loss'] = 'mse'
if 'optimizer' in confs_keys and isinstance(confs['optimizer'], str):
confs_nn['optimizer'] = confs['optimizer']
else:
confs_nn['optimizer'] = 'adam' # 训练优化器,控制梯度裁剪
if 'metrics' in confs_keys and isinstance(confs['metrics'], list):
confs_nn['metrics'] = confs['metrics']
else:
confs_nn['metrics'] = ['accuracy'] # 训练评估模式,不用于权重更新
return confs_nn
@staticmethod
def is_float_integer(var) -> bool:
"""检查变量是否是一个整数或浮点数。
"""
isInteger = np.issubdtype(type(var), np.integer)
isFloat = np.issubdtype(type(var), np.floating)
return isInteger or isFloat
def fit(self, train_x: np.array, train_y: np.array, confs: dict):
# 检查输入参数格式。
if not isinstance(train_x, np.ndarray) or not isinstance(train_y, np.ndarray):
raise IOError("LstmPredictor.fit() requires `numpy.ndarray`")
# 判断模型是否配置完成。
if not self._model:
raise RuntimeError(f"LstmPredictor({self._id}): not initialized.")
# 解析拟合配置。
self._confs.update(self._parse_fitting_settings(confs))
# 标准化数据集并生成符合预报器配置的训练集。
x_samples, y_samples = self._create_train_samples(train_x, train_y)
# 配置提前结束配置,防止过拟合。
earlystop = callbacks.EarlyStopping(monitor='val_accuracy',
min_delta=0.0001,
patience=2)
# 执行训练
self._history = self._model.fit(
x_samples,
y_samples,
# shuffle=False,
epochs=self._confs['epochs'],
batch_size=self._confs['batch_size'],
validation_split=self._confs['validation_split'],
callbacks=[earlystop],
verbose=self._confs['verbose'])
return self._history
def get_fit_history(self) -> dict:
"""获取模型训练过程中的准确率和损失过程。
该方法返回模型最近一次训练过程中训练集和验证集上的准确率和损失过程。
如果模型未经过训练,则模型返回 `None`。
Returns
----
模型训练过程,包括训练集上的准确率 'accuracy',损失 'loss';
验证集上的准确率 'val_accuracy',损失 'val_loss'。
相应的值是各项指标单值的列表。
"""
if self._history is not None:
return self._history.history
else:
return None
def _parse_fitting_settings(self, confs):
"""解析模型训练自定义配置。
"""
confs_fit = {}
confs_keys = confs.keys() if confs else []
# 带有默认配置的参数(可选配置)
if 'epochs' in confs_keys and self.is_float_integer(confs['epochs']):
confs_fit['epochs'] = int(confs['epochs']) # 训练次数
else:
confs_fit['epochs'] = 10
if 'batch_size' in confs_keys and self.is_float_integer(confs['batch_size']):
confs_fit['batch_size'] = int(confs['batch_size'])
else:
confs_fit['batch_size'] = 32 # 训练批次大小
if 'time_step' in confs_keys and self.is_float_integer(confs['time_step']):
confs_fit['time_step'] = confs['time_step'] # 数据集的时间步长(seconds)
else:
confs_fit['time_step'] = 3600
if 'validation_split' in confs_keys and self.is_float_integer(
confs['validation_split']):
confs_fit['validation_split'] = confs['validation_split']
else:
confs_fit['validation_split'] = 0.2 # 数据集中拆分验证集的比例
if 'verbose' in confs_keys and self.is_float_integer(confs['verbose']):
confs_fit['verbose'] = int(confs['verbose'])
else:
confs_fit['verbose'] = 0 # 训练过程中日志输出等级(0为不输出日志)
return confs_fit
def _parse_numpy_to_pandas(self, array, columns):
"""将numpy数组转为pandas数据。
由于 数值数据和字符串数据共存于一个数组中, 可能导致全部为字符串类型。
这会导致数据集编码异常。
同时,这也说明整数类型和浮点数类型可能无法区分。
另外, 要求数据集中不存在 NAN等非法数据, 以及每组数据的类型排列一致。
"""
df = pd.DataFrame(array, columns=columns)
for col in df.columns:
fst_elem = df[col].iloc[0]
if isinstance(fst_elem, str) and self._is_num_in_str(fst_elem):
df[col] = df[col].astype(np.float64)
return df
def _is_num_in_str(self, str_elem):
"""判断字符串是否为数值类型(包括整数、浮点数、负数)。
"""
if str_elem[0] == '-': # 负数
str_elem = str_elem[1:]
if str_elem.isdigit(): # 整数
return True
str_arr = str_elem.split('.') # 浮点数
if len(str_arr) > 2:
return False
for s in str_arr:
if not s.isdigit():
return False
return True
def _create_train_samples(self, train_x, train_y):
"""根据数据集生成训练样本集。
"""
# 重构数据集。
train_x_df = self._parse_numpy_to_pandas(train_x, self.features)
train_y_df = self._parse_numpy_to_pandas(train_y, self.labels)
# 训练分类器。
self._y_encoder.partial_fit(train_y_df)
self._x_encoder.partial_fit(train_x_df)
train_y_encoded = self._y_encoder.transform(train_y_df)
train_x_encoded = self._x_encoder.transform(train_x_df)
# 训练缩放器。
self._x_scaler.partial_fit(train_x_encoded)
self._y_scaler.partial_fit(train_y_encoded)
train_x_scaled = self._x_scaler.transform(train_x_encoded)
train_y_scaled = self._y_scaler.transform(train_y_encoded)
# 根据预报步长重构标签数据集,但同时不修改confs['labels'](即模型内部维护对应关系)。
# 注意,这里的‘滑动步长stride’和‘采样步长sampling_rate’均为1。
n_forecast = self._confs['n_forecast']
train_y_refactor = []
for i in range(n_forecast, train_y_scaled.shape[0]):
y = train_y_scaled[i - n_forecast]
for j in range(1, n_forecast):
y = np.append(y, train_y_scaled[i + j - n_forecast])
train_y_refactor.append(y)
train_y_refactor = np.array(train_y_refactor)
# 生成样本集。
x_size = train_x_scaled.shape[0]
y_size = train_y_refactor.shape[0]
size = min(x_size, y_size)
x_samples, y_samples = self._generate_samples(train_x_scaled[:size],
train_y_refactor[:size], True)
return x_samples, y_samples
def _generate_samples(self, x_dataset, y_dataset, isFitting):
"""根据时间步数生成符合lstm输入形状的样本集。
"""
# 检查数据集长度。
x_shape = x_dataset.shape
y_shape = y_dataset.shape
if x_shape[0] != y_shape[0]:
raise ValueError(
f"LstmPredictor({self._id}): x, y data sizes doesnt match.")
# 检查数据集特征属性。
n_features = len(self._confs['features'])
n_past = self._confs['n_past']
if x_shape[1] != n_features:
raise ValueError(
f"LstmPredictor({self._id}): data doesnt match features properties.")
if x_shape[0] < n_past:
raise ValueError(f"LstmPredictor({self._id}): to few feature data.")
# 检查数据集标签属性。
n_forecast = self._confs['n_forecast']
n_labels = len(self._confs['labels'])
if not isFitting and y_shape[1] != n_labels:
raise ValueError(
f"LstmPredictor({self._id}): data doesnt match label properties.")
if isFitting and y_shape[1] != n_labels * n_forecast:
raise ValueError(
f"LstmPredictor({self._id}): data doesnt match label properties.")
if isFitting and y_shape[0] < n_forecast:
raise ValueError(f"LstmPredictor({self._id}): to few label data.")
# 提取数据集。
if x_shape[0] == n_past:
x_samples = x_dataset.reshape(-1, n_past, n_features)
y_samples = y_dataset.reshape(-1, n_labels)
return x_samples, y_samples
size = x_shape[0] - n_past
samples = TimeseriesGenerator(x_dataset,
y_dataset,
length=n_past,
batch_size=size)
x_samples, y_samples = samples[0][0], samples[0][1]
return x_samples, y_samples
def predict(self, pred_x: np.array) -> np.array:
# 检查输入数据集格式。
if not isinstance(pred_x, np.ndarray):
raise IOError("LstmPredictor.predict() requires `numpy.ndarray`")
# 生成预报数据集。
x_samples = self._create_pred_samples(pred_x)
# 执行预报。
pred_y = self._model.predict(x_samples) if self._model else None
# 提取预报结果。
n_forecast = self._confs['n_forecast']
n_labels = len(self.labels)
pred_y_extract = pred_y[:, 0:n_labels]
if n_forecast > 1:
pred_y_extend = pred_y[-1, (-n_forecast + 1) * n_labels:]
pred_y_extract = np.append(pred_y_extract, pred_y_extend)
pred_y_extract = pred_y_extract.reshape(-1, n_labels)
# 预报结果还原。
pred_y_inverse = self._inverse_prediction(pred_y_extract)
return pred_y_inverse
def _create_pred_samples(self, pred_x):
"""生成预报样本集。
"""
# 编码数据集。
pred_x_df = self._parse_numpy_to_pandas(pred_x, self.features)
pred_x_encoded = self._x_encoder.transform(pred_x_df)
# 标准化数据集。
pred_x_scaled = self._x_scaler.transform(pred_x_encoded)
pred_y_scaled = np.ones([pred_x_scaled.shape[0], len(self.labels)])
# 生成样本集
x_samples, _ = self._generate_samples(pred_x_scaled, pred_y_scaled, False)
return x_samples
def _inverse_prediction(self, pred_y):
"""将模拟结果逆标准化恢复到实际值。
"""
# 逆标准化。
pred_y_1 = self._y_scaler.inverse_transform(pred_y)
if pred_y_1 is None:
raise RuntimeError(
f"LstmPredictor({self._id}): model not loaded or fitted.")
# 逆编码。
pred_y_df = pd.DataFrame(pred_y_1, columns=self.labels)
pred_y_2 = self._y_encoder.inverse_transform(pred_y_df).to_numpy()
if pred_y_2 is None:
raise RuntimeError(
f"LstmPredictor({self._id}): model not loaded or fitted.")
return pred_y_2
def evaluate(self, test_x: np.array, test_y: np.array) -> dict:
# 检查输入参数格式。
if not isinstance(test_x, np.ndarray) or not isinstance(test_y, np.ndarray):
raise IOError("LstmPredictor.evaluate() requires `numpy.ndarray`")
# 执行预报。
pred_y = self.predict(test_x)
if pred_y is None:
raise RuntimeError(f"LstmPredictor({self._id}): prediction error.")
# 进行评估。
scores = {}
n_past = self._confs['n_past']
n_forecast = self._confs['n_forecast']
for i in range(len(self.labels)):
label_id = self._confs['labels'][i]
test_y_i = test_y[n_past:, i] # 实测结果剔除前期无用结果
if n_forecast < 2:
pred_y_i = pred_y[:, i]
else:
pred_y_i = pred_y[:-n_forecast + 1, i] # 剔除提前预报结果
mae = mean_absolute_error(test_y_i, pred_y_i)
mse = mean_squared_error(test_y_i, pred_y_i)
scores[label_id] = {'mae': mae, 'mse': mse}
return scores
def get_model_confs(self) -> dict:
"""获取模型配置。
注意,模型在不同阶段的配置属性可能不同。
Returns
----
返回模型配置。
"""
return self._confs
def save(self):
save_path = self._confs['save_path']
# 保存模型。
self._model.save(self._get_model_file(save_path))
# 保存编码器。
x_id, y_id = self._get_encoder_ids()
self._x_encoder.save(*self._get_encoder_file(save_path, x_id))
self._y_encoder.save(*self._get_encoder_file(save_path, y_id))
# 保存定标器。
x_id, y_id = self._get_scaler_ids()
self._x_scaler.save(*self._get_scaler_file(save_path, x_id))
self._y_scaler.save(*self._get_scaler_file(save_path, y_id))
# 保存模型配置属性。
with open(self._get_confs_file(save_path), 'w', encoding='utf8') as fo:
json.dump(self._confs, fo)
def reset(self, **kwargs):
self._model = kwargs['model']
self._x_encoder = kwargs['x_encoder']
self._y_encoder = kwargs['y_encoder']
self._x_scaler = kwargs['x_scaler']
self._y_scaler = kwargs['y_scaler']
self._confs = kwargs['confs']
@staticmethod
def load(ID: str, load_path: str) -> LstmPredictor:
lstm = LstmPredictor(ID)
# 加载模型。
model = models.load_model(lstm._get_model_file(load_path))
# 加载编码器。
x_id, y_id = lstm._get_encoder_ids()
x_encoder = OrdinalEncoder.load(x_id, *lstm._get_encoder_file(load_path, x_id))
y_encoder = OrdinalEncoder.load(y_id, *lstm._get_encoder_file(load_path, y_id))
# 加载定标器。
x_id, y_id = lstm._get_scaler_ids()
x_scaler = StdScaler.load(x_id, *lstm._get_scaler_file(load_path, x_id))
y_scaler = StdScaler.load(y_id, *lstm._get_scaler_file(load_path, y_id))
# 加载模型配置属性。
with open(lstm._get_confs_file(load_path), 'r', encoding='utf8') as fi:
confs = json.load(fi)
# 配置模型
lstm.reset(model=model,
x_encoder=x_encoder,
y_encoder=y_encoder,
x_scaler=x_scaler,
y_scaler=y_scaler,
confs=confs)
return lstm
def _get_model_file(self, parent_dir):
"""配置模型保存和加载文件。
"""
model_path = os.path.join(parent_dir, self._id)
if not os.path.exists(model_path):
os.makedirs(model_path)
model_file = os.path.join(model_path, f'GR-LSTM-{self._id}-model.h5')
return model_file
def _get_scaler_file(self, parent_dir, scaler_id):
"""配置定标器保存和加载文件。
"""
model_path = os.path.join(parent_dir, self._id)
if not os.path.exists(model_path):
os.makedirs(model_path)
scaler_file = os.path.join(model_path, f'GR-LSTM-{scaler_id}-scaler.pkl')
property_file = os.path.join(model_path,
f'GR-LSTM-{scaler_id}-scaler-property.json')
return scaler_file, property_file
def _get_encoder_file(self, parent_dir, encoder_id):
"""配置编码器保存和加载文件。
"""
model_path = os.path.join(parent_dir, self._id)
if not os.path.exists(model_path):
os.makedirs(model_path)
encoder_file = os.path.join(model_path, f'GR-LSTM-{encoder_id}-encoder.pkl')
property_file = os.path.join(model_path,
f'GR-LSTM-{encoder_id}-encoder-property.json')
return encoder_file, property_file
def _get_confs_file(self, parent_dir):
"""配置自定义配置保存和加载文件。
"""
confs_path = os.path.join(parent_dir, self._id)
if not os.path.exists(confs_path):
os.makedirs(confs_path)
confs_file = os.path.join(confs_path, f'GR-LSTM-{self._id}-confs.json')
return confs_file
def summary(self):
if self._model:
self._model.summary()
@property
def features(self):
return self._confs['features']
@property
def labels(self):
return self._confs['labels'] | AIFloodMaster | /AIFloodMaster-1.1.0-py3-none-any.whl/FloodMaster/forecasters/LstmPredictor.py | LstmPredictor.py |
import numpy as np
import matplotlib.pyplot as plt
import os
class Regression_single_target:
def __init__(self, lr=0.01, num_iters=1000):
"""
Constructor for the Regression_single_target class.
Parameters:
lr (float): learning rate for gradient descent (default=0.01).
num_iters (int): number of iterations for gradient descent (default=1000).
"""
self.lr = lr
self.num_iters = num_iters
self.weights = None
self.bias = None
self.loss_history = []
def fit(self, X, y):
"""
Fit linear regression model to the data.
Parameters:
X (numpy.ndarray): input data of shape (n_samples, n_features).
y (numpy.ndarray): target labels of shape (n_samples, 1).
Returns:
None
"""
n_samples, n_features = X.shape
# Initialize the weights and bias
self.weights = np.zeros((n_features, 1))
self.bias = 0
# Gradient descent
for i in range(self.num_iters):
y_predicted = np.dot(X, self.weights) + self.bias
dw = (1/n_samples) * np.dot(X.T, (y_predicted - y))
db = (1/n_samples) * np.sum(y_predicted - y)
self.weights -= self.lr * dw.T # Transpose dw
self.bias -= self.lr * db
# Record the loss at each iteration
self.loss_history.append(self.mean_squared_error(y, y_predicted))
def predict(self, X):
"""
Predict target labels for new data.
Parameters:
X (numpy.ndarray): new input data of shape (n_samples, n_features).
Returns:
numpy.ndarray: predicted target labels of shape (n_samples, 1).
"""
y_predicted = np.dot(X, self.weights) + self.bias
return y_predicted
def mean_squared_error(self, y_true, y_pred):
"""
Compute the mean squared error between true and predicted target labels.
Parameters:
y_true (numpy.ndarray): true target labels of shape (n_samples, 1).
y_pred (numpy.ndarray): predicted target labels of shape (n_samples, 1).
Returns:
float: mean squared error between true and predicted target labels.
"""
return np.mean((y_true - y_pred) ** 2)
def plot_loss_history(self):
"""
Plot the loss history over the course of training.
Parameters:
None
Returns:
None
"""
plt.plot(range(self.num_iters), self.loss_history)
plt.title('Loss vs. Iterations')
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.savefig('loss_history.png', dpi=300)
plt.clf()
def plot_data(self):
"""
Plot the input data and target labels.
Parameters:
None
Returns:
None
"""
plt.scatter(X,y)
plt.title('Data vs labels')
plt.xlabel('Data [X]')
plt.ylabel('Labels [y]')
plt.savefig('dataplot.png', dpi=300)
plt.clf()
def plot_regression(self, X, y, X_new, y_pred):
"""
Plot the linear regression line on top of the input data and target labels.
Parameters:
X (numpy.ndarray): input data of shape (n_samples, n_features).
y (numpy.ndarray): target labels of shape (n_samples, 1).
X_new (numpy.ndarray): new input data of shape (n_samples_new, n_features).
y_pred (numpy.ndarray): predicted target labels of shape (n_samples_new, 1).
Returns:
None
"""
plt.scatter(X, y)
plt.plot(X_new, y_pred, 'r')
plt.title('Linear Regression')
plt.xlabel('X')
plt.ylabel('y')
plt.savefig('regression.png', dpi=300)
plt.clf()
def to_latex(self, filename):
"""
Generates a LaTeX document with the training parameters, plots of the data and label,
loss vs iterations, and the linear regression line.
Parameters:
filename (str): The name of the LaTeX file to be generated.
"""
with open(filename, 'w') as f:
f.write('\\documentclass{article}\n')
f.write('\\usepackage{graphicx}\n')
f.write('\\begin{document}\n\n')
f.write('Training parameters:\n')
f.write('\\begin{itemize}\n')
f.write('\\item Learning rate: {}\n'.format(self.lr))
f.write('\\item Number of iterations: {}\n'.format(self.num_iters))
f.write('\\end{itemize}\n\n')
f.write("\\")
f.write('Data vs label:\n')
f.write('\\begin{figure}[h]\n')
f.write('\\centering\n')
f.write('\\includegraphics[scale = 0.5]{dataplot.png}\n')
f.write('\\end{figure}\n\n')
f.write("\\")
f.write("\newpage")
f.write('Loss vs. iterations:\n')
f.write('\\begin{figure}[h]\n')
f.write('\\centering\n')
f.write('\\includegraphics[scale = 0.5]{loss_history.png}\n')
f.write('\\end{figure}\n\n')
f.write("\\")
f.write('Linear regression:\n')
f.write('\\begin{figure}[h]\n')
f.write('\\centering\n')
f.write('\\includegraphics[scale = 0.5]{regression.png}\n')
f.write('\\end{figure}\n\n')
f.write('\\end{document}\n')
# Compile the LaTeX file to PDF
os.system('pdflatex {}'.format(filename)) | AIFrameWork | /AIFrameWork-0.0.5-py3-none-any.whl/regression_single_target/regression_single_target.py | regression_single_target.py |
from modules.web.OpenAI.OpenAI import createCompletion
import subprocess, os, re
def runCode(codePath):
try:
output = subprocess.check_output(['python3', codePath], stderr=subprocess.STDOUT).decode('utf-8');
return (output, False);
except subprocess.CalledProcessError as e:
err = e.output.decode('utf-8');
return (err, True);
def installModule(module):
try:
subprocess.run(['python3', '-m', 'pip', 'install', module], stderr=subprocess.STDOUT);
return True;
except subprocess.CalledProcessError as e:
return False;
def aiFunction(function, description, installPackages):
if (not os.path.exists('functions')):
os.mkdir('functions');
functionPath = f'functions/{function}.py';
if (not os.path.exists(functionPath)):
code = createCompletion([
{
'content': 'You are an AI code generator. Generate ONLY the python code for the given function description, nothing else. The code should include just the function body, without the def statement, and should print just the output, without any other text. The code should take no user input, and should only have a single print statement.',
'role': 'system'
},
{
'content': 'Here is an example:\nFunction: time\nDescription: Returns the current time.\n\nfrom datetime import datetime\nnow = datetime.now();\ntime = now.strftime("%I:%M %p");\nprint(time);',
'role': 'system'
},
{
'content': f'Function: {function}\nDescription: {description}',
'role': 'user'
}
]);
with open(functionPath, 'w') as functionFile:
functionFile.write(code);
# Run the code from the path. If there are errors, print them. Return the output
(output, err) = runCode(functionPath);
while (err):
if ('ModuleNotFoundError' in output and installPackages):
# Install the missing module
module = re.search(r"ModuleNotFoundError: No module named '(.*)'", output).group(1);
if (module == ''):
break;
print(f'Installing module: {module}');
installed = installModule(module);
if (not installed):
return f'Error: Could not install module: {module}';
(output, err) = runCode(functionPath);
else:
return '';
return output;
print(aiFunction('time', 'Returns the current time.')); | AIFunction-nekumelon | /AIFunction-nekumelon-0.0.2.tar.gz/AIFunction-nekumelon-0.0.2/src/main.py | main.py |
===========
AIGO
===========
AIGO is a python library for the Analysis and
the Inter-comparison of Gene Ontology functional annotations.
see (http://code.google.com/p/aigo).
Created by Michael Defoin-Platel on 21/02/2010.
Copyright (c) 2010. All rights reserved.
Typical usage could look like this::
#!/usr/bin/env python
from AIGO import logger
from AIGO.ReferenceSet import RefSet
from AIGO.FunctionalAnnotation import FuncAnnot
from AIGO.go.OBO import readGOoboXML
from AIGO.Analyse import AnalyseFA
from AIGO.Report import ReportFA
from AIGO.utils.Execute import batchExecute
refSet = RefSet(organism="platypus", fileName="platypus.refSet", refType="Text")
G = readGOoboXML("go_daily-termdb.obo-xml")
FA = FuncAnnot("platypusProject", refSet, G, organism="platypus")
FA.read("platypus.gaf", "GAF")
analyseFA = AnalyseFA()
analyseFA.largestSet([FA])
logger.info("Largest sets of annotations:")
logger.info("\t%d for %s" % (FA['largestSet']['All_aspects_of_GO'], FA.name))
batchList=["coverage", "richness", "numberAnnot", "redundancy", "specificity", "informationContent"]
batchExecute(batchList, analyseFA, [FA])
reportFA = ReportFA(outDir=None, name="platypusProject", organism="platypus")
reportFA.printStatistics([FA] ,batchList)
Tests
=====
Run testAIGO.py in the tests directory
Requirements
==============
Running AIGO on windows
-------------------------
* The 2.6.5 Python interpreter for Windows page http://www.python.org/download.
* GTK+ runtime (recommend bundle), PyGTK, PyCairo? and PyGObject http://www.pygtk.org/downloads.html
* BioPython? http://biopython.org/wiki/Download
* NumPy? http://sourceforge.net/projects/numpy/files/NumPy
* matplotlib http://sourceforge.net/projects/matplotlib/files/matplotlib/matplotlib-1.0
* xlwt http://pypi.python.org/pypi/xlwt
Optional :
* wxPython http://www.wxpython.org/download.php#binaries
* psyco http://sourceforge.net/projects/psyco/files
* RPy http://sourceforge.net/projects/rpy/files
Contributors
============
* Michael Defoin-Platel
* Matthew Hindle
| AIGO | /AIGO-0.1.0.tar.gz/AIGO-0.1.0/README.txt | README.txt |
# AIGrammar
## About
AIGrammar is all in one and easy to use package for model diagnostic and vulnerability checks. It enable with a simple line of code to check model metrics and prediction generalizability, feature contribution, and model vulnerability against adversarial attacks.
**Data**
- Multicollinearity
- Data drift
**Model**
- Metric metric comparison
- roc_auc vs average precision
- Optimal threshold vs 50% threshold
**Feature importance**
- Too high importance
- 0 impact
- Negative influence (FLOFO)
- Causes of overfitting
**Adversarial Attack**
- Model vulnerability identification based on one feature minimal change for getting opposite outcome.
**Usage**
Python 3.7+ required.
Installation: ``pip install AIGrammar``
Example:
``` from AIGrammar import AIGrammar
aig = AIGrammar(train, test, model, target_name)
aig.measure_all(X0_shap_values, X1_shap_values)
print(aig.diagnosis)
print(aig.warnings)
```
| AIGrammar | /AIGrammar-0.0.6.tar.gz/AIGrammar-0.0.6/README.md | README.md |
__all__ = ['python']
class Python:
name = ''
pip = None
def __init__(self, telecontrol):
self.tc = telecontrol
self._ensure_python(self.tc.yum)
self.pip = _Pip(self)
def start(self, file):
return self.tc.start(self._join(file))
def _join(self, *s):
return ' '.join([self.name] + list(s))
def _is_ok(self):
names = ['python3', 'python3.4', 'python3.5', 'python3.6']
for name in names:
self.tc.sendline(' '.join(['type', name]), stdout=False)
self.tc.prompt()
if not self.tc.show():
self.name = name
return True
else:
return False
def _ensure_python(self, yum):
if self._is_ok():
return
yum.install('epel-release')
_, msg = yum.install('python34')
if self._is_ok():
return
raise Exception('\n'.join([
'Cannot found python.',
'Messgae from yum:',
msg]))
class _Pip:
name = ''
def __init__(self, python):
self.python = python
self.tc = python.tc
def __bool__(self):
return bool(self.name)
def list(self):
self._ensure_pip()
self.tc.sendline(' '.join([self.name, 'list',
'--format=legacy']), stderr=False)
self.tc.prompt()
foo = self.tc.show()
foo = foo.split('\n')
foo = filter(lambda x:x, foo)
foo = [i.split(' ')[0].strip() for i in foo]
self.tc.reset()
return foo
def install(self, packs):
if not isinstance(packs, list):
packs = [packs]
self._ensure_pip()
for p in packs:
self.tc.sendline(' '.join([self.name, 'install', p, '-U']),
stdout=False, stderr=False)
self.tc.prompt()
foo = self.list()
for p in packs:
if p not in foo:
self.tc.reset()
return False, p
else:
return True, None
def _is_ok(self):
name = ' '.join([self.python.name, '-m', 'pip'])
self.tc.sendline(name, stdout=False)
self.tc.prompt()
if not self.tc.show():
self.name = name
return True
name = 'pip3'
self.tc.sendline(name, stdout=False)
return False
def _ensure_pip(self):
if self or self._is_ok():
return
self.tc.sendline(' '.join([
self.python.name, '-m', 'ensurepip']), stdout=False)
self.tc.prompt()
msg = self.tc.show()
if not msg:
self.tc.sendline(' '.join([
self.python.name, '-m', 'pip',
'install', '--upgrade', 'pip']), stdout=False)
self.tc.prompt()
msg = self.tc.show()
if not self._is_ok():
raise Exception('Pip Error:\n' + msg) | AIJIcossh | /AIJIcossh-0.0.3.tar.gz/AIJIcossh-0.0.3/cossh/python.py | python.py |
from pexpect import TIMEOUT
import os.path as op
import re
import time
dirpath = op.dirname(op.abspath(__file__))
class Yum:
def __init__(self, telecontrol):
self.tc = telecontrol
boo, msg = self._repair()
if boo is False:
raise Exception('Yum is not available.')
# Internal, blocking
def _is_ok(self):
'''check yum is available or not'''
# @WARNING: This function will never timeout
self.tc.sendline('yum list yum', stdout=False)
possibles = [self.tc.PROMPT, TIMEOUT]
while True:
index = self.tc.expect(possibles, timeout=5)
if index == 0:
if not self.tc.readlines():
return True, ''
return False, self.tc.readlines()
elif index == 1:
pass
# Internal
def _repair(self):
'''Try to fix problems on yum'''
boo, msg = self._is_ok()
res = 'Unkonw exception when repairing yum.'
if boo is True:
return True, ''
if 'Error: Cannot retrieve metalink for repository: epel.' in msg:
source = op.join(dirpath, 'epel.repo')
filepath = '/etc/yum.repos.d/epel2.repo'
foo, bar = self.upload(source, filepath)
if foo is False:
res = 'Upload repo failed'
elif __debug__: print('Upload repo successfully.')
else:
raise Exception('Unknow yum problem\n' + msg + '\n\n')
boo, msg = self._is_ok()
if boo is not True:
return False, res
else:
return True, ''
def has(self, name):
'''Find a application installed or not'''
self.tc.sendline('yum list %s' % name, stdout=False)
self.tc.prompt(5)
if not self.tc.readlines():
return True
name = 'type ' + name
self.tc.sendline(name, stdout=False)
self.tc.prompt(5)
foo = self.tc.readlines()
if foo:
return False
return True
def install(self, name):
'''Install packages by yum.'''
if self.has(name) is True:
return True, 'exist'
self.tc.sendline('yum install %s -y' % name, stdout=False)
while True:
possibles = [TIMEOUT, self.tc.PROMPT]
index = self.tc.expect(possibles)
if index == 0:
pass
else:
if not self.tc.readlines():
return True, 'complete'
else:
info = '\n'.join(['fail when installing', '='*10, self.tc.readlines(), '='*10, ''])
return False, info | AIJIcossh | /AIJIcossh-0.0.3.tar.gz/AIJIcossh-0.0.3/cossh/yum.py | yum.py |
import re
import sys
import os
import time
from pexpect.pxssh import pxssh
import pexpect as pe
from pexpect import TIMEOUT
from devtools.format import tostr
from .yum import Yum
from .python import Python
INTERNAL = 0.5
# A wrap for the pxssh Class (which is a wrap of spawn Class).
# Make controlling remote more easily
class Telecontrol(pxssh):
target_addr = ('', 0)
def __init__(
self,
user,
ip='122.144.139.60',
port='12045',
passwd='yylc#8888',
timeout=3,
echo=True):
super(Telecontrol, self).__init__(timeout=timeout)
self.login(ip, user, passwd, port=port)
if echo or __debug__ :
print('logged in. %s@%s:%s' % (user, ip, port))
self.target_addr = (ip, str(port))
self.user = user
self.passwd = passwd
self.yum = Yum(self)
self.python = Python(self)
if __debug__ : print('checked in advance.')
if __debug__ : print('Telecontrol created.')
def start(self, cmd):
'''run a cmd with no hup or stdin/out/err. Returns the pid of child process'''
self.sendline(cmd, nohup=True, stderr=False)
self.prompt()
res = re.search('\[\d*\] (\d*)', self.show()).groups()[0]
return int(res)
# Override, add sleep to avoid some weird bugs
def prompt(self, timeout=-1, sleep=0.1):
time.sleep(sleep)
res = super(Telecontrol, self).prompt(timeout)
time.sleep(sleep)
return res
# Override, add option stdout=False
def sendline(self, s='', stdout=True, stderr=True, nohup=False):
if stdout is False:
s = s + ' 1>/dev/null'
if stderr is False:
s = s + ' 2>/dev/null'
if nohup is True:
s = 'nohup ' + s + ' &'
if nohup:
if stderr:
# unfixed !!!!
raise NotImplementedError('Weird line feed.')
self.prompt()
self.expect(os.linesep, timeout=0.1)
# self.expect(dd[os.linesep], timeout=0.1)
res = res + self.sendline('clear')
else:
res = super(Telecontrol, self).sendline(s)
return res
else:
res = super(Telecontrol, self).sendline(s)
return res
# return True: is a directory
# return False: is not a directory
# return None: no such file or directory
def isdir(self, filepath):
self.sendline(filepath + '/')
self.prompt()
text = self.readlines()
if re.search('(?i)is a directory', text):
return True
elif re.search('(?i)not a directory', text):
return False
elif re.search('(?i)no such file or directory', text):
return None
else:
raise Exception('Unpredicted output:\n {0} \n\n'.format(text))
# remove file or directory sliently by force
def remove(self, filepath):
foo = self.isdir(filepath)
if foo is True:
self.sendline('rm -rf %s' % filepath)
self.prompt()
elif foo is False:
self.sendline('rm -f %s' % filepath)
self.prompt()
elif foo is None:
if __debug__: print('Remove {0}: file or dir not found'.format(filepath))
return
if __debug__ : print('Remove {0}'.format(filepath), self.readlines(), sep='\n')
def mkdir(self, dirpath):
if not dirpath.startswith('/'):
raise NotImplementedError('mkdir for relative path not supported yet')
self.sendline('mkdir %s' % dirpath, stdout=False)
self.prompt()
foo = self.readlines()
if foo:
return False, foo
else:
return True, ''
def show(self):
foo = self.before.decode()
foo = re.sub('^.*\r\n', '', foo)
return foo
def readlines(self):
foo = self.show()
foo = foo.split('\n')
foo = filter(lambda x: x.strip() and not re.match('\[\d*\]\+', x), foo)
return list(foo)
def senddir(self, source_dir, target_dir, timeout=10):
if not os.path.exists(source_dir):
return False, 'file or directory not found'
if not os.path.isdir(source_dir):
raise Exception('Not a dir.')
target_dir = target_dir + '/'
self.mkdir(target_dir)
target_path = '{0}@{1}:{2}'.format(
self.user,
self.target_addr[0],
target_dir)
cmd = 'scp -P {0} -r {1}/* {2}'.format(
self.target_addr[1],
source_dir,
target_path)
child = pe.spawn(cmd, timeout=3)
time = 0
possibles = [
'(?i)password: ', # 0
'(?i)yes/no', # 1
'lost connection', # 2
pe.EOF, # 3
pe.TIMEOUT, # 4
'ssh:', # 5
'100%', # 6
'(?i)no such', # 7
]
while True:
index = child.expect(possibles)
if __debug__ and index != 6: print('Uploading ... code:', index, str(possibles[index]))
if index == 0:
child.sendline(self.passwd)
elif index == 1:
child.sendline('yes')
elif index == 2:
return False, 'lost connection'
elif index == 3:
return True, child.before
elif index == 4:
time += child.timeout
if time > timeout:
return False, 'upload timeout'
elif index == 5:
return False, 'ssh failed'
elif index == 6:
pass
elif index == 7:
print('Upload failed, cmd:', cmd)
# This function need to be improved
def upload(self, source_file, target_dir, target_name='', opt='', timeout=10):
if not os.path.exists(source_file):
return False, 'file or directory not found'
if os.path.isdir(source_file):
opt = '-r'
elif os.path.isfile(source_file):
opt = ''
else:
raise Exception('Not a dir nor a file')
if source_file.strip().endswith('/'):
raise NotImplementedError('"scp source/dir/ target/dir" not implemented.')
target_dir = target_dir + '/'
if not target_name:
target_name = source_file.split('/')[-1]
target_path = os.path.join(target_dir, target_name)
self.mkdir(target_dir)
if target_dir.count('/') <= 1:
raise Exception('This is a danger path')
self.remove(target_path)
target_path = '{0}@{1}:{2}'.format(
self.user,
self.target_addr[0],
target_path)
cmd = 'scp -P {0} {1} {2} {3}'.format(
self.target_addr[1],
opt,
source_file,
target_path)
if __debug__: print('Upload cmd:', cmd)
child = pe.spawn(cmd, timeout=3)
time = 0
possibles = [
'(?i)password: ', # 0
'(?i)yes/no', # 1
'lost connection', # 2
pe.EOF, # 3
pe.TIMEOUT, # 4
'ssh:', # 5
'100%'] # 6
while True:
index = child.expect(possibles)
if __debug__ and index != 6: print('Uploading ... code:', index, str(possibles[index]))
if index == 0:
child.sendline(self.passwd)
elif index == 1:
child.sendline('yes')
elif index == 2:
return False, 'lost connection'
elif index == 3:
return True, child.before
elif index == 4:
time += child.timeout
if time > timeout:
return False, 'upload timeout'
elif index == 5:
return False, 'ssh failed'
elif index == 6:
pass
def reset(self):
self.sendcontrol('c')
self.prompt(0.1)
self.sendline('clear')
self.prompt(0.1)
time.sleep(0.5) | AIJIcossh | /AIJIcossh-0.0.3.tar.gz/AIJIcossh-0.0.3/cossh/telecontrol.py | telecontrol.py |
import os.path
from aijobs import (
TopCVSeleniumScraper,
VietnamWorksSeleniumScraper,
IndeedSeleniumScraper,
Helper,
)
from argparse import ArgumentParser, Namespace
import json
arg = ArgumentParser("AIJobs -- Tool to collect VN AI Jobs from different sources")
arg.add_argument(
"--headless", "-H", action="store_true", help="Whether if use headless mode"
)
arg.add_argument(
"--browser",
"-b",
type=str,
default="./scripts/chrome/GoogleChromePortable64/GoogleChromePortable64/GoogleChromePortable.exe",
help="Path to the Chrome browser for testing",
)
arg.add_argument(
"--driver",
"-d",
type=str,
default="./scripts/chrome/undetected_chromedriver.exe",
help="Path to the UnDetected Chrome Driver for testing",
)
arg.add_argument(
"--scrapper", "-s", type=str, default="TopCV", help="The name of the scrapper"
)
arg.add_argument(
"--query",
"-q",
nargs="+",
type=str,
help="The query to search for jobs",
required=True,
)
arg.add_argument(
"--location",
"-l",
type=str,
default="Hanoi, Vietnam",
help="The location of the jobs",
)
arg.add_argument(
"--output",
"-o",
type=str,
default="./output",
help="The directory to save the output JSON files",
)
params = arg.parse_args()
def cmd() -> None:
browser = os.path.abspath(params.browser)
driver = os.path.abspath(params.driver)
if params.scrapper.lower() == "vietnamworks":
scrapper = VietnamWorksSeleniumScraper(
browser_path=browser, driver_path=driver, headless=params.headless
)
start_url = "https://vietnamworks.com"
elif params.scrapper.lower() == "indeed":
scrapper = IndeedSeleniumScraper(
browser_path=browser, driver_path=driver, headless=params.headless
)
start_url = "https://vn.indeed.com"
elif params.scrapper.lower() == "topcv":
scrapper = TopCVSeleniumScraper(
browser_path=browser, driver_path=driver, headless=params.headless
)
start_url = "https://www.topcv.vn"
else:
raise NotImplementedError(
"This scrapper is not supported. Please consult to the manager."
)
queries = params.query
location = params.location
if not os.path.exists(params.output):
os.makedirs(params.output)
for query in queries:
results = scrapper.scrape(
start_url=start_url, query=query, location=location, loop_next_page=True
)
json.dump(
fp=open(
f"{params.output}/{params.scrapper}_{query.replace(' ', '+')}_{Helper.get_current_time()}.json",
"w",
encoding="utf-8",
),
obj=results,
)
scrapper.close() | AIJobs-Batch | /AIJobs_Batch-1.0.0a1-py3.8-py3-any.whl/aijobs/cli/cmd_batch.py | cmd_batch.py |
from typing import Any
import undetected_chromedriver as uc
class CommonScraper(object):
"""
A common scraper
"""
def __init__(self):
pass
def __parse__(self, url: str | None) -> Any:
"""
The parse function
:param url: the query URL
:return: the object of scraped jobs in this individual page
"""
pass
def scrape(
self,
start_url: str,
query: str,
location: str | None,
loop_next_page: bool = True,
) -> Any:
"""
Scrape function
:param start_url: the start url for e.g., https://vn.indeed.com/
:param query: the job keywords, for e.g., 'ai engineer'
:param location: the location you want to work, for e.g., 'hanoi'
:param loop_next_page: Indeed or job sharing pages has pagination, so whether if you want to the scraper to
loop to the next page
:return: the object of scraped jobs.
"""
pass
def __str__(self):
return "A common scraper"
class CommonSeleniumScraper(CommonScraper):
_driver: uc.Chrome | None
_options: uc.ChromeOptions | None
_driver_path: str | None = None
_browser_path: str | None = None
def __init__(
self,
binary: str | None = None,
headless: bool = True,
driver_path: str | None = None,
browser_path: str | None = None,
) -> None:
super().__init__()
options = uc.ChromeOptions()
if binary is not None:
options.binary_location = binary
# options.headless = True
if headless:
options.add_argument("--headless=new")
self._options = options
self._driver_path = driver_path
self._browser_path = browser_path
self._driver = uc.Chrome(
use_subprocess=True,
user_multi_procs=False,
options=self._options,
driver_executable_path=self._driver_path,
browser_executable_path=self._browser_path,
)
def __str__(self):
return "Common Selenium Scraper"
def close(self) -> None:
"""
Close the current windows after use
:return: none
"""
if self._driver and isinstance(self._driver, uc.Chrome):
self._driver.close() | AIJobs-Batch | /AIJobs_Batch-1.0.0a1-py3.8-py3-any.whl/aijobs/data/common_scraper.py | common_scraper.py |
import json
import time
from typing import Any, Callable, Tuple
from bs4 import BeautifulSoup
from selenium.common import StaleElementReferenceException, NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.expected_conditions import (
presence_of_element_located,
AnyDriver,
visibility_of_element_located,
)
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from tqdm import tqdm
from aijobs.data.common_scraper import CommonSeleniumScraper
__all__ = ["VietnamWorksSeleniumScraper"]
from aijobs.utils.helpers import Helper
SEARCH_BAR_INPUT_FIELD_CLASS: str = "SearchBar-module_inputField"
SEARCH_BAR_INPUT_FIELD_PLACEHOLDER: str = "Tìm kiếm việc làm, công ty, kỹ năng"
SEARCH_BAR_BUTTON_CLASS: str = "SearchBar-module_actionGroup"
BLOCK_LIST_CLASS: str = "block-job-list"
NO_OF_JOBS_CLASS: str = "no-of-jobs"
ITEMS_PER_PAGE: int = 50
MAX_PAGES_TO_SCRAPE: int = 10
REMOVE_ICON_CLASS_NAME: str = "remove-icon"
REMOVE_ICON_ID_NAME: str = "icons/icon-forms/icon-remove-gray"
SORT_U_BUTTON_CLASS_NAME: str = "sort-item-wrapped"
SORT_U_BUTTON_TEXT: str = "Ngày đăng (mới nhất)"
def presence_of_element_located_and_text_non_empty(
locator: Tuple[str, str]
) -> Callable[[AnyDriver], bool]:
"""An expectation for checking that an element is present on the DOM of a
page. This does not necessarily mean that the element is visible.
And the text must be non-empty.
:param locator: used to find the element
:return: whether the element is found and the text is not empty
"""
def _predicate(driver):
try:
element = driver.find_element(*locator)
return element.text is not None and element.text.strip() != ""
except StaleElementReferenceException:
return False
return _predicate
class VietnamWorksSeleniumScraper(CommonSeleniumScraper):
_start_url: str = "https://vietnamworks.com"
def __init__(
self,
binary: str | None = None,
headless: bool = True,
driver_path: str | None = None,
browser_path: str | None = None,
) -> None:
super().__init__(binary, headless, driver_path, browser_path)
def _click_sort_button_(self):
"""
Click Sort U button
:return:
"""
try:
sort_U = self._driver.find_element(
By.XPATH,
f"//div[contains(@class, '{SORT_U_BUTTON_CLASS_NAME}')]/div[contains(@text, '{SORT_U_BUTTON_TEXT}')]",
)
sort_U.click()
time.sleep(2)
except Exception as e:
print(e)
def _click_delete_icon_(self):
"""
Click a delete icon
:return: None
"""
try:
delete_button = self._driver.find_element(By.ID, REMOVE_ICON_ID_NAME)
delete_button.click()
except Exception as e:
print(e)
def __parse__(self, query: str, *args, **kwargs) -> Any:
try:
self._driver.get(kwargs.get("url", None))
element = WebDriverWait(self._driver, 60).until(
presence_of_element_located(
(
By.XPATH,
f"//div[starts-with(@class, '{SEARCH_BAR_INPUT_FIELD_CLASS}')]",
)
)
)
element = self._driver.find_element(
By.XPATH,
f"//input[@placeholder='{SEARCH_BAR_INPUT_FIELD_PLACEHOLDER}']",
)
element.click()
self._click_delete_icon_()
element.clear()
time.sleep(1)
element.send_keys(query)
element = self._driver.find_element(
By.XPATH,
f"//div[starts-with(@class, '{SEARCH_BAR_BUTTON_CLASS}')]/button[contains(@class, 'clickable')]",
)
element.click()
except Exception as e:
print(e)
# self._driver.quit()
return None
try:
WebDriverWait(self._driver, 60).until(
visibility_of_element_located((By.XPATH, f"//strong[@title='{query}']"))
)
# self._click_sort_button_()
# Scroll down to get all items in this page
for i in range(10):
self._driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight);"
)
element = self._driver.find_element(
By.XPATH, f"//div[@class='{BLOCK_LIST_CLASS}']"
)
elements = element.find_elements(
By.XPATH, f"//div[contains(@class, 'view_job_item')]/div"
)
jobs = []
for el in elements:
html = el.get_attribute("innerHTML")
company = None
company_url = None
job_url = None
job_name = None
bs = BeautifulSoup(html, features="lxml")
links = bs.find_all("a", recursive=True)
for link in links:
href = link.get("href")
print(f"HREF: {href}")
if href is None or "http" in href:
continue
if "/nha-tuyen-dung/" in href:
company = link.text
company_url = self._start_url + href
else:
job_url = self._start_url + href
job_name = link.text
if (
company is None
and company_url is None
and job_url is None
and job_name is None
):
continue
jobs.append(
{
"title": job_name,
"url": job_url,
"company": company,
"company_url": company_url,
}
)
# self._driver.quit()
return jobs
except Exception as e:
print(e)
# self._driver.quit()
return None
def _parse_job_page_(self, url: str) -> Any:
"""
Parse a single job pahe
:param url: the target job page
:return: a tuple of benefits, description, requirements, locations, tags, and side information about the job
"""
try:
self._driver.get(url)
WebDriverWait(self._driver, 6).until(
visibility_of_element_located(
(By.XPATH, "//div[contains(@class, 'job-description')]")
)
)
element = self._driver.find_element(By.CLASS_NAME, "what-we-offer")
benefits = element.text
element = self._driver.find_element(By.CLASS_NAME, "job-description")
description = element.text
element = self._driver.find_element(By.CLASS_NAME, "job-requirements")
requirements = element.text
element = self._driver.find_element(By.CLASS_NAME, "job-locations")
locations = element.text
element = self._driver.find_element(By.CLASS_NAME, "job-tags")
tags = element.text
element = self._driver.find_element(By.CLASS_NAME, "tab-sidebar")
side_info = element.text
return benefits, description, requirements, locations, tags, side_info
except Exception as e:
print(e)
return None, None, None, None, None, None
def scrape(
self,
start_url: str,
query: str,
location: str | None,
loop_next_page: bool = True,
) -> Any:
jobs = self.__parse__(query, url=start_url)
if not loop_next_page:
return jobs
if jobs is not None:
count = 2
updated_url = self._driver.current_url
while count < MAX_PAGES_TO_SCRAPE:
try:
self._driver.find_element(
By.XPATH, "//ul[contains(@class, 'pagination')]"
)
except NoSuchElementException as e:
print(e)
break
tmp = self.__parse__(query, url=f"{updated_url}?page={count}")
count += 1
if tmp is None:
break
jobs.extend(tmp)
job_dict = {}
for job in jobs:
if job["url"] not in job_dict:
job_dict[job["url"]] = job
jobs = list(job_dict.values())
print(f"JOBS: {len(jobs)}")
for k, job in tqdm(enumerate(jobs)):
job_url = job["url"]
params = Helper.get_url_params(job_url)
job["placement"] = (
params["placement"] if "placement" in params else None
)
(
benefits,
description,
requirements,
locations,
tags,
side_info,
) = self._parse_job_page_(job_url)
job["extra"] = {
"benefits": benefits,
"description": description,
"requirements": requirements,
"locations": locations,
"tags": tags,
"side_info": side_info,
}
jobs[k] = job
# self._driver.close()
return {
"total": len(jobs) if jobs is not None else 0,
"query": query,
"location": None,
"dataList": jobs,
}
def __str__(self):
return "VietnamWorks Selenium Scraper" | AIJobs-Batch | /AIJobs_Batch-1.0.0a1-py3.8-py3-any.whl/aijobs/data/vietnamworks/selenium_scrapers.py | selenium_scrapers.py |
import json
import time
from typing import Any
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.support.expected_conditions import (
presence_of_element_located,
)
from selenium.webdriver.support.wait import WebDriverWait
from aijobs.data.common_scraper import CommonSeleniumScraper
from aijobs.utils.helpers import Helper
__all__ = ["TopCVSeleniumScraper"]
INPUT_PLACEHOLDER_TEXT: str = "Vị trí ứng tuyển"
FIND_JOBS_BTN_CLASS_NAME: str = "btn-search-job"
TOTAL_SEARCH_JOB_CLASS_NAME: str = "total-job-search"
JOB_LIST_CLASS_NAME: str = "job-list-default"
JOB_ITEM_CLASS_NAME: str = "job-item-default"
NUM_ITEMS_PER_PAGE: int = 25
MAX_PAGES: int = 10
JOB_DESCRIPTION_CLASS_NAME: str = "job-detail__information-detail--content"
JOB_DESCRIPTION_CLASS_NAME_1: str = "job-description"
JOB_INFO_SECTION_CLASS_NAME: str = "job-detail__info--section-content"
JOB_INFO_SECTION_TITLE_CLASS_NAME: str = "job-detail__info--section-content-title"
JOB_INFO_SECTION_VALUE_CLASS_NAME: str = "job-detail__info--section-content-value"
JOB_INFO_RIGHT_SIDE_CLASS_NAME: str = "box-general-group-info"
JOB_INFO_RIGHT_SIDE_TITLE_CLASS_NAME: str = "box-general-group-info-title"
JOB_INFO_RIGHT_SIDE_VALUE_CLASS_NAME: str = "box-general-group-info-value"
JOB_INFO_CATEGORY_CLASS_NAME: str = "box-category"
JOB_INFO_CATEGORY_TITLE_CLASS_NAME: str = "box-title"
JOB_INFO_CATEGORY_TAG_CLASS_NAME: str = "box-category-tag"
JOB_INFO_DEADLINE_CLASS_NAME: str = "job-detail__info--deadline"
BRAND_JOB_DESCRIPTION_CLASS_NAME: str = "job-data"
BRAND_JOB_ATTR_CLASS_NAME: str = "box-main"
BRAND_JOB_ATTR_CLASS_NAME_1: str = "box-item"
BRAND_JOB_ADDR_CLASS_NAME: str = "box-address"
# FIX: 2023-08-19 topCV is showing a popup that prevents the driver to click on the search button
CLOSE_BUTTON_20230819_CLASS_NAME: str = "close"
CLOSE_MODAL_20230819_CLASS_NAME: str = "modal-content"
# FIX: SORT UP BUTTON on CREATED DATETIME
SORT_U_BUTTON_FOR_NAME: str = "sort-value-new"
class TopCVSeleniumScraper(CommonSeleniumScraper):
def __init__(
self,
binary: str | None = None,
headless: bool = True,
driver_path: str | None = None,
browser_path: str | None = None,
):
super().__init__(binary, headless, driver_path, browser_path)
def _click_sort_button_(self):
"""
Click on the sort U button
:return:
"""
try:
sort_U = self._driver.find_element(
By.XPATH, f"//label[@for='{SORT_U_BUTTON_FOR_NAME}']"
)
sort_U.click()
time.sleep(3)
except Exception as e:
print(e)
def _close_topcv_advertisement_20230819(self):
"""
Click on close button on advertisement popups
:return:
"""
try:
element = self._driver.find_element(
By.XPATH,
f"//div[contains(@class, '{CLOSE_MODAL_20230819_CLASS_NAME}')]/button[contains(@class,'{CLOSE_BUTTON_20230819_CLASS_NAME}')]",
)
element.click()
except Exception as e:
print("No advertisement found.")
return
def __parse__(self, query: str = None, *args, **kwargs) -> Any:
url = kwargs.get("url", None)
print(f"Visiting {url} ...")
try:
self._driver.get(url)
self._close_topcv_advertisement_20230819()
element = WebDriverWait(self._driver, 60).until(
presence_of_element_located(
(By.XPATH, f"//input[@placeholder='{INPUT_PLACEHOLDER_TEXT}']")
)
)
element.clear()
time.sleep(1)
element.send_keys(query)
element = self._driver.find_element(By.CLASS_NAME, FIND_JOBS_BTN_CLASS_NAME)
element.click()
except Exception as e:
print(e)
# self._close_topcv_advertisement_20230819()
# self._driver.quit()
return None
current_url = self._driver.current_url
items = []
print(f"getting job list ... ")
try:
WebDriverWait(self._driver, 60).until(
presence_of_element_located(
(By.CLASS_NAME, TOTAL_SEARCH_JOB_CLASS_NAME)
)
)
# Scroll down to get all items in this page
for i in range(10):
self._driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight);"
)
num_items = int(
self._driver.find_element(
By.CLASS_NAME, TOTAL_SEARCH_JOB_CLASS_NAME
).text
)
if num_items > 0:
num_pages = min(
num_items // NUM_ITEMS_PER_PAGE
+ (0 if num_items % NUM_ITEMS_PER_PAGE == 0 else 1),
MAX_PAGES,
)
elements = self._driver.find_elements(
By.XPATH,
f"//div[@class='{JOB_LIST_CLASS_NAME}']/div[contains(@class,'{JOB_ITEM_CLASS_NAME}')]",
)
for element in elements:
html = element.get_attribute("innerHTML")
root = BeautifulSoup(html, features="lxml")
# topcv_job_id = root.get("data-job-id")
company = None
company_url = None
job = None
job_url = None
titles = root.find_all(
lambda tag: tag.name == "h3"
and "title" in tag.get("class", []),
recursive=True,
)
if len(titles) > 0:
job = titles[0].text.strip()
job_urls = titles[0].find_all(lambda tag: tag.name == "a")
if len(job_urls) > 0:
job_url = job_urls[0].get("href")
companies = root.find_all(
lambda tag: tag.name == "a"
and "company" in tag.get("class", []),
recursive=True,
)
if len(companies) > 0:
company = companies[0].get("data-original-title", None)
company_url = companies[0].get("href", None)
if (
company is None
and company_url is None
and job is None
and job_url is None
):
continue
items.append(
{
# "job_id": topcv_job_id,
"company": company,
"company_url": company_url,
"title": job,
"url": job_url,
}
)
for i in range(1, num_pages):
new_url = (
f"{current_url}{'&' if '?' in current_url else '?'}page={i + 1}"
)
self._driver.get(new_url)
WebDriverWait(self._driver, 60).until(
presence_of_element_located(
(By.CLASS_NAME, TOTAL_SEARCH_JOB_CLASS_NAME)
)
)
# Scroll down to get all items in this page
for j in range(10):
self._driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight);"
)
elements = self._driver.find_elements(
By.XPATH,
f"//div[@class='{JOB_LIST_CLASS_NAME}']/div[contains(@class,'{JOB_ITEM_CLASS_NAME}')]",
)
for element in elements:
html = element.get_attribute("innerHTML")
root = BeautifulSoup(html, features="lxml")
# topcv_job_id = root.get("data-job-id")
company = None
company_url = None
job = None
job_url = None
titles = root.find_all(
lambda tag: tag.name == "h3"
and "title" in tag.get("class", []),
recursive=True,
)
if len(titles) > 0:
job = titles[0].text.strip()
job_urls = titles[0].find_all(lambda tag: tag.name == "a")
if len(job_urls) > 0:
job_url = job_urls[0].get("href")
companies = root.find_all(
lambda tag: tag.name == "a"
and "company" in tag.get("class", []),
recursive=True,
)
if len(companies) > 0:
company = companies[0].get("data-original-title", None)
company_url = companies[0].get("href", None)
if (
company is None
and company_url is None
and job is None
and job_url is None
):
continue
items.append(
{
# "job_id": topcv_job_id_job_id,
"company": company,
"company_url": company_url,
"title": job,
"url": job_url,
}
)
except Exception as e:
print(e)
# self._driver.quit()
return None
return items
def _parse_item_(self, item_url: str = None) -> Any:
"""
Parse an item page
:param item_url: the target job page
:return: an object of the job
"""
print(f"Visiting {item_url} ...")
try:
self._driver.get(item_url)
self._close_topcv_advertisement_20230819()
element = WebDriverWait(self._driver, 60).until(
presence_of_element_located(
(
By.XPATH,
f"//div[contains(@class, '{JOB_DESCRIPTION_CLASS_NAME}')]/div[contains(@class, '{JOB_DESCRIPTION_CLASS_NAME_1}')]",
)
)
)
job_description = element.text.strip()
elements = self._driver.find_elements(
By.XPATH,
f"//div[contains(@class, '{JOB_INFO_SECTION_CLASS_NAME}')]",
)
except Exception as e:
print(e)
return None
attributes = {}
for element in elements:
html = element.get_attribute("innerHTML")
root = BeautifulSoup(html, features="lxml")
try:
attribute_names = root.find_all(
lambda tag: tag.name == "div"
and JOB_INFO_SECTION_TITLE_CLASS_NAME in tag.get("class", [])
)
if len(attribute_names) == 0:
continue
attribute_name = attribute_names[0].text.strip()
attribute_data = root.find_all(
lambda tag: tag.name == "div"
and JOB_INFO_SECTION_VALUE_CLASS_NAME in tag.get("class", [])
)
if len(attribute_data) == 0:
continue
attribute = attribute_data[0].text.strip()
attributes[attribute_name] = attribute
except Exception as e:
print(e)
continue
try:
elements = self._driver.find_elements(
By.XPATH,
f"//div[contains(@class, '{JOB_INFO_RIGHT_SIDE_CLASS_NAME}')]",
)
for element in elements:
html = element.get_attribute("innerHTML")
root = BeautifulSoup(html, features="lxml")
try:
attribute_names = root.find_all(
lambda tag: tag.name == "div"
and JOB_INFO_RIGHT_SIDE_TITLE_CLASS_NAME in tag.get("class", [])
)
if len(attribute_names) == 0:
continue
attribute_name = attribute_names[0].text.strip()
attribute_data = root.find_all(
lambda tag: tag.name == "div"
and JOB_INFO_RIGHT_SIDE_VALUE_CLASS_NAME in tag.get("class", [])
)
if len(attribute_data) == 0:
continue
attribute = attribute_data[0].text.strip()
attributes[attribute_name] = attribute
except Exception as e:
print(e)
continue
except Exception as e:
print(e)
try:
elements = self._driver.find_elements(
By.XPATH,
f"//div[contains(@class, '{JOB_INFO_CATEGORY_CLASS_NAME}')]",
)
for element in elements:
html = element.get_attribute("innerHTML")
root = BeautifulSoup(html, features="lxml")
try:
attribute_names = root.find_all(
lambda tag: tag.name == "div"
and JOB_INFO_CATEGORY_TITLE_CLASS_NAME in tag.get("class", [])
)
if len(attribute_names) == 0:
continue
attribute_name = attribute_names[0].text.strip()
tags = [
t.text.strip()
for t in root.find_all(
lambda tag: tag.name == "a"
and JOB_INFO_CATEGORY_TAG_CLASS_NAME in tag.get("class", [])
)
]
attributes[attribute_name] = tags
except Exception as e:
print(e)
continue
except Exception as e:
print(e)
deadline = None
try:
element = self._driver.find_element(
By.CLASS_NAME, JOB_INFO_DEADLINE_CLASS_NAME
)
deadline = element.text.replace("Hạn nộp hồ sơ: ", "").strip()
except Exception as e:
print(e)
return {
"description": job_description,
"attributes": attributes,
"deadline": deadline,
}
def _parse_brand_item_(self, item_url: str = None) -> Any:
"""
Parse a brand job page
:param item_url: the target brand
:return: an object of the job
"""
print(f"Visiting {item_url} ...")
try:
self._driver.get(item_url)
self._close_topcv_advertisement_20230819()
element = WebDriverWait(self._driver, 60).until(
presence_of_element_located(
(
By.XPATH,
f"//div[contains(@class, '{BRAND_JOB_DESCRIPTION_CLASS_NAME}')]",
)
)
)
job_description = element.text.strip()
elements = self._driver.find_elements(
By.XPATH,
f"//div[contains(@class, '{BRAND_JOB_ATTR_CLASS_NAME}')]/div[contains(@class, '{BRAND_JOB_ATTR_CLASS_NAME_1}')]",
)
except Exception as e:
print(e)
return None
attributes = {}
for element in elements:
html = element.get_attribute("innerHTML")
root = BeautifulSoup(html, features="lxml")
try:
attribute_name = root.find_all(lambda tag: tag.name == "strong")[
0
].text.strip()
attribute = root.find_all(lambda tag: tag.name == "span")[
0
].text.strip()
attributes[attribute_name] = attribute
except Exception as e:
print(e)
continue
try:
elements = self._driver.find_elements(
By.XPATH,
f"//div[contains(@class, '{BRAND_JOB_ADDR_CLASS_NAME}')]",
)
for element in elements:
html = element.get_attribute("innerHTML")
root = BeautifulSoup(html, features="lxml")
try:
attribute_name = root.find_all(lambda tag: tag.name == "p")[
0
].text.strip()
attribute = root.find_all(lambda tag: tag.name == "div")[
0
].text.strip()
attributes[attribute_name] = attribute
except Exception as e:
print(e)
continue
except Exception as e:
print(e)
return {"description": job_description, "attributes": attributes}
def scrape(
self,
start_url: str,
query: str,
location: str | None,
loop_next_page: bool = True,
) -> Any:
jobs = self.__parse__(query=query, url=start_url)
job_dict = {}
for job in jobs:
if job["url"] not in job_dict:
job_dict[job["url"]] = job
items = list(job_dict.values())
print(f"JOBS: {len(items)}")
# pprint.pprint(items)
result = []
if items is not None:
for item in items:
job_url = item["url"]
if "/brand/" not in job_url:
extra_info = self._parse_item_(item_url=job_url)
else:
extra_info = self._parse_brand_item_(item_url=job_url)
result.append({"basic": item, "extra": extra_info})
return {
"total": len(result),
"query": query,
"location": None,
"dataList": result,
}
def __str__(self):
return "TopCV Selenium Scrapper" | AIJobs-Batch | /AIJobs_Batch-1.0.0a1-py3.8-py3-any.whl/aijobs/data/topcv/selenium_scrapers.py | selenium_scrapers.py |
import json
import re
import time
from typing import Any, Tuple
from selenium.webdriver.common.by import By
from selenium.webdriver.support.expected_conditions import presence_of_element_located
from selenium.webdriver.support.wait import WebDriverWait
from aijobs.data.common_scraper import CommonSeleniumScraper
__all__ = ["IndeedSeleniumScraper"]
from aijobs.utils.helpers import Helper
JOB_COUNTER_CLASS_NAME: str = "jobsearch-JobCountAndSortPane-jobCount"
FIND_JOBS_BTN_CLASS_NAME: str = "yosegi-InlineWhatWhere-primaryButton"
JOB_PATTERNS: str = (
r'window.mosaic.providerData\["mosaic-provider-jobcards"\]=(\{.+?\});'
)
DEFAULT_LOCATION: str = "Hanoi, Vietnam"
ITEMS_PER_PAGE: int = 10
MAX_PAGES: int = 50
WHAT_INPUT_ID: str = "text-input-what"
WHERE_INPUT_ID: str = "text-input-where"
def __parse_html_source__(html: str):
html_data = re.findall(JOB_PATTERNS, html)
if len(html_data) == 0:
return None
html_data = json.loads(html_data[0])
return html_data["metaData"]["mosaicProviderJobCardsModel"]["results"]
class IndeedSeleniumScraper(CommonSeleniumScraper):
def __init__(
self,
binary: str | None = None,
headless: bool = True,
driver_path: str | None = None,
browser_path: str | None = None,
) -> None:
super().__init__(binary, headless, driver_path, browser_path)
def _bypass_cloudflare_check(self):
"""
Bypass the check screen of Cloudflare
:return:
"""
try:
element = self._driver.find_element(
By.XPATH, f"//label[@class='ctp-checkbox-label']"
)
element.click()
except Exception as e:
print(e)
def __parse__(
self, query: str = None, location: str = None, *args, **kwargs
) -> Any:
global job_count
url = kwargs.get("url", None)
print(f"Visiting {url} ...")
try:
self._driver.get(url)
self._bypass_cloudflare_check()
timed_out = False
# find the total number of jobs
element = WebDriverWait(self._driver, 60).until(
presence_of_element_located((By.CLASS_NAME, JOB_COUNTER_CLASS_NAME))
)
job_count = int(element.text.split()[0].strip())
except Exception as e:
print(e)
timed_out = True
if timed_out:
time.sleep(1)
try:
element = self._driver.find_element(
By.XPATH, f"//input[@id='{WHAT_INPUT_ID}']"
)
element.click()
element.clear()
time.sleep(1)
element.send_keys(query)
# element = self._driver.find_element(
# By.XPATH, f"//input[@id='{WHERE_INPUT_ID}']"
# )
# element.click()
# element.clear()
# time.sleep(1)
# element.send_keys(location)
element = self._driver.find_element(
By.CLASS_NAME, FIND_JOBS_BTN_CLASS_NAME
)
element.click()
# find the total number of jobs
element = WebDriverWait(self._driver, 6).until(
presence_of_element_located((By.CLASS_NAME, JOB_COUNTER_CLASS_NAME))
)
job_count = int(element.text.split()[0].strip())
except Exception as e:
print(e)
# self._driver.close()
return None
# get data from html
html = self._driver.page_source
try:
data = __parse_html_source__(html)
except Exception as e:
print(e)
# self._driver.close()
return None
return {"total": job_count, "data": data}
def _parse_job_(self, view_url: str) -> Tuple[str | None, str | None]:
"""
Parse a single job page
:param view_url: the target job page
:return: the title and description as a tuple
"""
try:
self._driver.get(view_url)
self._bypass_cloudflare_check()
element = WebDriverWait(self._driver, 6).until(
presence_of_element_located(
(By.XPATH, "//div[@id='jobDescriptionText']")
)
)
description = element.text
element = self._driver.find_element(
By.XPATH, "//div[@id='jobDescriptionTitle']"
)
title = element.text
return title, description
except Exception as e:
print(e)
return None, None
def scrape(
self,
start_url: str,
query: str,
location: str | None,
loop_next_page: bool = True,
) -> Any:
page_count = 0
url = (
f"{start_url}/?q={query.replace(' ', '+')}&sort=date&l="
f"{location.replace(' ', '+') if location is not None else DEFAULT_LOCATION}"
)
# page 0
page0 = self.__parse__(url=url, query=query, location=location)
if page0 is None:
return None
total_count = page0["total"]
pages = total_count // ITEMS_PER_PAGE + (
1 if total_count % ITEMS_PER_PAGE > 0 else 0
)
result = {"total": total_count, "dataList": page0["data"]}
if loop_next_page:
updated_url = self._driver.current_url
while page_count < pages:
page_count += 1
page = self.__parse__(
url=f"{updated_url}&start={page_count * ITEMS_PER_PAGE}",
query=query,
location=location,
)
if page is None:
break
result["dataList"].extend(page["data"])
data_list = result["dataList"]
data_set = {}
for d in data_list:
if d["jobkey"] in data_set:
continue
data_set[d["jobkey"]] = d
for k in data_set:
d = data_set[k]
if "viewJobLink" not in d:
data_set[k]["extra"] = None
continue
job_url = start_url + d["viewJobLink"]
print(f"JOB URL: {job_url}")
title, description = self._parse_job_(view_url=job_url)
data_set[k]["extra"] = {"title": title, "description": description}
result["dataList"] = dict(data_set)
result["total"] = len(data_set)
result["query"] = query
result["location"] = None # location
# self._driver.close()
return result
def __str__(self):
return "Indeed Selenium Scraper" | AIJobs-Batch | /AIJobs_Batch-1.0.0a1-py3.8-py3-any.whl/aijobs/data/indeed/selenium_scrapers.py | selenium_scrapers.py |
AIKIF - Artificial Intelligence Knowledge Information Framework
This is an information classification framework that maps structured or freeform data to
a standard knowledge store.
Manages a dataset of your applications, the source data, parameters, runs and results and then uses your business rules to convert and store the information in a machine usable format.
Your AI software can link to AIKIF by setting up logging watch-points to define success /
failure along with the range of input parameters. Goals and plans are defined by breaking
them down to smaller tasks until the task can be run by a tool in the Toolbox.
A tool is any python wrapped function or application and is easily extensible.
Quick Start
===========
>> pip install aikif
or get latest info from https://github.com/acutesoftware/AIKIF
| AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/README.txt | README.txt |
import os
core_data_types = [ 'Character', # Who
'Object', # What
'Location', # Where
'Event', # When
'Process', # How
'Fact' # Why
]
core_process = [ 'List',
]
class CoreData(object):
"""
Base class for all core data objects
"""
def __init__(self, name, data=None, parent=None):
"""
define an object with a name
"""
self.name = name
self.data = data # can be as detailed or simple as needed
self.parent = parent
self.child_nodes = []
self.links = []
self.data_type = ''
self.type_desc = ''
def __str__(self):
if self.data_type == '':
return self.name
else:
return self.name + ' (type=' + self.data_type + ')'
def format_csv(self, delim=',', qu='"'):
"""
Prepares the data in CSV format
"""
res = qu + self.name + qu + delim
if self.data:
for d in self.data:
res += qu + str(d) + qu + delim
return res + '\n'
def format_dict(self, delim=':', qu="'"):
"""
Prepares the data as a dictionary with column headers
TODO - get variable names of data[] as strings for hdr
"""
res = 'name' + delim + qu + self.name + qu + ','
for num, d in enumerate(self.data):
res += 'col' + str(num) + delim + qu + str(d) + qu + ','
return res
def format_all(self):
"""
return a trace of parents and children of the obect
"""
res = '\n--- Format all : ' + str(self.name) + ' -------------\n'
res += ' parent = ' + str(self.parent) + '\n'
res += self._get_all_children()
res += self._get_links()
return res
def _get_all_children(self,):
"""
return the list of children of a node
"""
res = ''
if self.child_nodes:
for c in self.child_nodes:
res += ' child = ' + str(c) + '\n'
if c.child_nodes:
for grandchild in c.child_nodes:
res += ' child = ' + str(grandchild) + '\n'
else:
res += ' child = None\n'
return res
def _get_links(self,):
"""
return the list of links of a node
"""
res = ''
if self.links:
for l in self.links:
res += ' links = ' + str(l[0]) + '\n'
if l[0].child_nodes:
for chld in l[0].child_nodes:
res += ' child = ' + str(chld) + '\n'
if l[0].links:
for lnk in l[0].links:
res += ' sublink = ' + str(lnk[0]) + '\n'
else:
res += ' links = None\n'
return res
def drill_down(self):
"""
this WALKS down the tree to get the LIST of
nodes at the detail level (see expand to actually
add the list of nodes
TODO = processes need to be recalculated
"""
return self.child_nodes
def drill_up(self):
"""
returns the parent note - opposite of drill down
TODO = processes need to be recalculated
"""
return self.parent
def expand(self, process, child_nodes):
"""
this expands a current node by defining ALL the
children for that process
TODO = processes need to be recalculated
"""
#print('TODO: process check = ', process)
#print(self.name, ' expanded to ->', child_nodes)
self.child_nodes = []
for n in child_nodes:
self.child_nodes.append(CoreData(n, parent=self))
def contract(self, process):
"""
this contracts the current node to its parent and
then either caclulates the params and values if all
child data exists, OR uses the default parent data.
(In real terms it returns the parent and recalculates)
TODO = processes need to be recalculated
"""
print('TODO: process check = ', process)
print(self.name, ' contracted to ->', self.parent)
return self.parent
def get_child_by_name(self, name):
"""
find the child object by name and return the object
"""
for c in self.child_nodes:
if c.name == name:
return c
return None
def links_to(self, other, tpe):
"""
adds a link from this thing to other thing
using type (is_a, has_a, uses, contains, part_of)
"""
if self.check_type(tpe):
self.links.append([other, tpe])
else:
raise Exception('aikif.core_data cannot process this object type')
def check_type(self, tpe):
"""
TODO - fix this, better yet work out what the hell
you are trying to do here.
returns the type of object based on type string
"""
for v in core_data_types:
if tpe == v:
return True
return False
class CoreDataWho(CoreData):
def __init__(self, name, data=None, parent=None):
"""
WHO
Characters are physical people, or software agents
data = ['Name', 'Phys|Virt', rights]
"""
CoreData.__init__(self, name, data, parent)
self.data_type = 'who'
self.type_desc = 'Character'
def __str__(self):
return CoreData.__str__(self)
class CoreDataWhat(CoreData):
def __init__(self, name, data=None, parent=None):
"""
WHAT
Objects currently dont have any additional
data properties
"""
CoreData.__init__(self, name, data, parent)
self.data_type = 'what'
self.type_desc = 'Object'
def __str__(self):
return CoreData.__str__(self)
class CoreDataWhere(CoreData):
def __init__(self, name, data=None, parent=None):
"""
WHERE
Locations are physical or virtual places
data = ['Name', 'Phys|Virt', 'Location']
"""
CoreData.__init__(self, name, data, parent)
self.data_type = 'where'
self.type_desc = 'Location'
def __str__(self):
return CoreData.__str__(self)
class CoreDataWhen(CoreData):
def __init__(self, name, data=None, parent=None):
"""
WHEN
Events have a simple data structure
date, category, remind_time, event
"""
#data = [date, category, details]
CoreData.__init__(self, name, data, parent)
self.data_type = 'when'
self.type_desc = 'Event'
def __str__(self):
return CoreData.__str__(self)
class CoreDataHow(CoreData):
def __init__(self, name, data=None, parent=None):
"""
HOW
Processes are the act of doing things physically
such as 'build a boat', 'hammer a nail', 'go to shops',
or software processes (BAT runs , etc)
data = ['Name', 'Phys|Virt', start_name]
"""
CoreData.__init__(self, name, data, parent)
self.data_type = 'how'
self.type_desc = 'Process'
def __str__(self):
return CoreData.__str__(self)
class CoreDataWhy(CoreData):
def __init__(self, name, data=None, parent=None):
"""
WHY
Facts are all other information or generally LINKS
between other types, eg
John ate an Apple => Who.John How.Ate What.Apple
Mars is heavy => What.Mars Process.PropertyEquals 'heavy' <--- how to classify this
data = ['Name', 'Phys|Virt', start_name]
"""
CoreData.__init__(self, name, data, parent)
self.data_type = 'why'
self.type_desc = 'Fact'
def __str__(self):
return CoreData.__str__(self)
class CoreTable(object):
"""
Class to manage the collection of multiple CoreData
objects. Keeps everything as a list of objects such
as Events, Locations, Objects, etc and handles the
saving, loading and searching of information.
"""
def __init__(self, fldr, tpe, user, header):
self.type = tpe
self.user = user
self.fldr = fldr
self.table = [] # list of data - eg events, locations, etc
self.header = header # mod_core.Event('Name', 'Date', 'Journal', 'Details')
def __str__(self):
res = ''
res += ' type = ' + self.type + '\n'
res += ' user = ' + self.user + '\n'
res += ' fldr = ' + self.fldr + '\n'
for e in self.table:
res += e.format_csv()
return res
def get_filename(self, year):
"""
returns the filename
"""
res = self.fldr + os.sep + self.type + year + '.' + self.user
return res
def add(self, e):
self.table.append(e)
def find(self, txt):
result = []
for e in self.table:
print('find(self, txt) e = ', e)
if txt in str(e):
result.append(e)
#print(e)
return result
def save(self, file_tag='2016', add_header='N'):
"""
save table to folder in appropriate files
NOTE - ONLY APPEND AT THIS STAGE - THEN USE DATABASE
"""
fname = self.get_filename(file_tag)
with open(fname, 'a') as f:
if add_header == 'Y':
f.write(self.format_hdr())
for e in self.table:
f.write(e.format_csv())
def format_hdr(self, delim=',', qu='"'):
"""
Prepares the header in CSV format
"""
res = ''
if self.header:
for d in self.header:
res += qu + str(d) + qu + delim
return res + '\n'
def generate_diary(self):
"""
extracts event information from core tables into diary files
"""
print('Generate diary files from Event rows only')
for r in self.table:
print(str(type(r)) + ' = ', r) | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/core_data.py | core_data.py |
import os
import sys
fldrs = {}
logs = {}
params = {}
"""
# path for personal data location (TODO - you need to modify this line below!)
if sys.platform == 'linux':
if os.path.exists('/home/duncan'):
hme = '/home/duncan/'
core_folder = '/home/duncan/dev/src/python/AIKIF'
print('config.py : running locally on duncans PC!')
else:
hme = os.getcwd()
core_folder = os.getcwd()
print('config.py : running on CI build!')
else:
hme = 'T:\\user\\'
core_folder = 'T:\\user\\dev\\src\\python\\AIKIF'
"""
def get_root_folder():
"""
returns the home folder and program root depending on OS
"""
locations = {
'linux':{'hme':'/home/duncan/', 'core_folder':'/home/duncan/dev/src/python/AIKIF'},
'win32':{'hme':'T:\\user\\', 'core_folder':'T:\\user\\dev\\src\\python\\AIKIF'},
'cygwin':{'hme':os.getcwd() + os.sep, 'core_folder':os.getcwd()},
'darwin':{'hme':os.getcwd() + os.sep, 'core_folder':os.getcwd()}
}
hme = locations[sys.platform]['hme']
core_folder = locations[sys.platform]['core_folder']
if not os.path.exists(core_folder):
hme = os.getcwd()
core_folder = os.getcwd()
print('config.py : running on CI build (or you need to modify the paths in config.py)')
return hme, core_folder
hme, core_folder = get_root_folder()
fldrs['localPath'] = hme + 'AIKIF' + os.sep
fldrs['log_folder'] = hme + 'AIKIF' + os.sep + 'log'
fldrs['pers_data'] = hme + 'AIKIF' + os.sep + 'pers_data'
fldrs['pers_credentials'] = hme + 'AIKIF' + os.sep + 'pers_data' + os.sep + 'credentials'
# FOR DEVELOPMENT
core_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + ".." )
fldrs['root_path'] = core_folder
fldrs['public_data_path'] = core_folder + os.sep + 'aikif' + os.sep + 'data'
fldrs['program_path'] = os.path.abspath(core_folder + os.sep + 'aikif')
# user defined parameters
params['AIKIF_version'] = '0.1.9'
params['AIKIF_deploy'] = 'DEV'
# names of logfiles for AIKIF
logs['logFileProcess'] = fldrs['localPath'] + 'log' + os.sep + 'process.log'
logs['logFileSource'] = fldrs['localPath'] + 'log' + os.sep + 'source.log'
logs['logFileCommand'] = fldrs['localPath'] + 'log' + os.sep + 'command.log'
logs['logFileResult'] = fldrs['localPath'] + 'log' + os.sep + 'result.log'
# index files
# fldrs['public_data_path'] + os.sep + 'index' + os.sep + 'ndxWordsToFilesLecture.txt',
# fldrs['localPath'] + 'diary' + os.sep + 'filelister2014.csv',
params['index_files'] = [fldrs['public_data_path'] + os.sep + 'index' + os.sep + 'ndxAll.txt',
fldrs['localPath'] + 'pers_data' + os.sep + 'pers_index_final.txt',
fldrs['localPath'] + 'pers_data' + os.sep + 'ndx_PCusage.txt'
]
def read_credentials(fname):
"""
read a simple text file from a private location to get
username and password
"""
with open(fname, 'r') as f:
username = f.readline().strip('\n')
password = f.readline().strip('\n')
return username, password
def show_config():
"""
module intended to be imported in most AIKIF utils
to manage folder paths, user settings, etc.
Modify the parameters at the top of this file to suit
"""
res = ''
res += '\n---------- Folder Locations ---------\n'
for k,v in fldrs.items():
res += str(k) + ' = ' + str(v) + '\n'
res += '\n---------- Logfiles ---------\n'
for k,v in logs.items():
res += str(k) + ' = ' + str(v) + '\n'
res += '\n---------- Parameters ---------\n'
for k,v in params.items():
res += str(k) + ' = ' + str(v) + '\n'
print("\nusage from other programs - returns " + fldr_root())
return res
def fldr_root():
return fldrs['root_path']
if __name__ == '__main__':
show_config() | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/config.py | config.py |
import os
#import logging
#logging.basicConfig(filename='test_bias.log',level=logging.DEBUG,format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
import cls_log
from decorators import debug
root_fldr = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + ".." )
bias_files = [ 'bias-website.csv',
'bias-source-type.csv',
'bias-person-reputation.csv',
'bias-person-relationship.csv',
'bias-collection-method.csv',
]
sample_metadata = [
{'label':'collection-method', 'value': 'website'},
{'label':'website', 'value': 'reddit.com'},
{'label':'person', 'value': 'acutesoftware'},
{'label':'reputation', 'value': 'logged in user'},
{'label':'time', 'value': 'recent'},
{'label':'relationship', 'value': 'self'},
{'label':'source-type', 'value': 'comment'},
]
sample_rawdata = { 'metadata': sample_metadata,
'data':'You should develop in Python 3 instead of 2 for new projects unless you have a dependant package that only works on version 2'}
lg = cls_log.Log(os.getcwd())
lg.record_process('bias.py', 'starting bias.py')
class Bias(object):
"""
class to give a rough weighting to a piece of information
based on source, time, context, etc.
This is used when automatically parsing data to ensure that
a random comment on a forum does not get equal weighting to
a peer reviewed academic paper.
There can be multiple biases, and each user can modify the
weights to what they deem accurate for their situation.
Parameters:
sources [] = list of sources, all optional. This comes
from the 'data' metadata and can contain
website, type, person-reputation, person-relationship,
collection_method
Public functions:
get_bias_rating() = returns the bias rating 0=bullshit -> 1=fact
"""
#@debug
def __init__(self, metadata):
"""
passes all data on command line leave as empty string for blank
"""
self.metadata = metadata
self.bias_rating = 1 # everything starts unbiased
lg.record_process('bias.py', 'init bias class')
self.bias_details = []
for f in bias_files:
self._read_bias_rating(f)
self._calculate_bias()
lg.record_process('bias.py', 'bias rating = ' + str(self.bias_rating) + ' for ' + str(self.metadata))
def __str__(self):
"""
returns a string of basic inputs and outputs
"""
res = 'Bias\n'
for m in self.metadata:
res += m['label'] + ' = ' + m['value'] + '\n'
res += 'BIAS Rating = ' + str(self.bias_rating) + '\n'
return res
def get_bias_details(self):
"""
returns a string representation of the bias details
"""
res = 'Bias File Details\n'
for b in self.bias_details:
if len(b) > 2:
res += b[0].ljust(35)
res += b[1].ljust(35)
res += b[2].ljust(9)
res += '\n'
return res
def _calculate_bias(self):
"""
returns a weighting from 0 to 1 based on the sources.
Due to fractions multiplying resulting in very small
numbers, adding 0.5 to bias calculations which means
actual range is 0.5 -> 1.5 (still testing)
"""
for m in self.metadata:
for b in self.bias_details:
if b[0] == 'bias-' + m['label'] + '.csv':
l_bias = 1.000
try:
l_bias = float(b[2]) + 0.5
except:
lg.record_process('bias.py','ERROR converting bias value to float: ' + str(b))
self.bias_rating *= l_bias
#@debug
def _read_bias_rating(self, short_filename):
"""
read the bias file based on the short_filename
and return as a dictionary
"""
res = {}
full_name = os.path.join(root_fldr, 'aikif', 'data', 'ref', short_filename)
lg.record_process('bias.py','reading ' + full_name)
with open(full_name, 'r') as f:
for line in f:
if line.strip('') == '':
break
bias_line = []
cols = line.split(',')
bias_line.extend([short_filename])
for col in cols:
bias_line.extend([col.strip('"').strip('\n')])
self.bias_details.append(bias_line)
def get_bias_rating(self):
return self.bias_rating
class Controversy(object):
"""
class to handle and report on controversial topics so
that it can be added as a value to bias calculations.
The outcome is a value from:
0 (universally agreed) to
1 (everyone argues about it)
There is also a 'noise' rating for a topic, also a value
0 = no one is talking about it
1 = everyone talks about it
Controversies should be time stamped so that the values
can be factored accordingly.
"""
def __init__(self, topic):
"""
testing reading noise by topic from external file
{
'topic' : ['maths', 'physics', 'economics', 'politics', 'religion'],
'controversy': ['0.01', '0.012', '0.4', '0.85', '0.95'],
'noise' : ['0.1', '0.2', '0.3', '0.9', '0.9']
}
Once all topics are loaded, the key 'topic' passed as on initialisation
is used to find the controversy and noise
"""
self.topic = topic
self.topics = []
import csv
full_name = os.path.join(root_fldr, 'aikif', 'data', 'ref', 'bias_by_topic.csv')
with open(full_name, 'r') as fin:
reader = csv.reader(fin)
for num, r in enumerate(reader):
self.topics.append({'name':r[0], 'controversy':float(r[1]), 'noise':float(r[2])})
self.controversy = self.get_controversy('controversy')
self.noise = self.get_controversy('noise')
def __str__(self):
res = 'Controversy: '
res += self.topic
res += ' controversy=' + str(self.controversy) + ' noise=' + str(self.noise) + '\n'
return res
def get_controversy(self, item='controversy'):
for t in self.topics:
if t['name'] == self.topic:
return t[item]
return 0 | AIKIF | /AIKIF-0.2.2.tar.gz/AIKIF-0.2.2/aikif/bias.py | bias.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.