code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import pyaudio
import struct
import math
INITIAL_TAP_THRESHOLD = 0.1 # 0.01 to 1.5
FORMAT = pyaudio.paInt16
SHORT_NORMALIZE = (1.0/32768.0)
CHANNELS = 2
RATE = 44100
INPUT_BLOCK_TIME = 0.05
INPUT_FRAMES_PER_BLOCK = int(RATE*INPUT_BLOCK_TIME)
OVERSENSITIVE = 15.0/INPUT_BLOCK_TIME
UNDERSENSITIVE = 120.0/INPUT_BLOCK_TIME
MAX_TAP_BLOCKS = 0.15/INPUT_BLOCK_TIME
def get_rms( block ):
count = len(block)/2
format = "%dh"%(count)
shorts = struct.unpack( format, block )
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n*n
return math.sqrt( sum_squares / count )
class TapTester(object):
def __init__(self):
self.pa = pyaudio.PyAudio()
self.stream = self.open_mic_stream()
self.tap_threshold = INITIAL_TAP_THRESHOLD
self.noisycount = MAX_TAP_BLOCKS+1
self.quietcount = 0
self.errorcount = 0
def stop(self):
self.stream.close()
def find_input_device(self):
device_index = None
for i in range( self.pa.get_device_count() ):
devinfo = self.pa.get_device_info_by_index(i)
# print( "Device %d: %s"%(i,devinfo["name"]) )
for keyword in ["mic","input"]:
if keyword in devinfo["name"].lower():
# print( "Found an input: device %d - %s"%(i,devinfo["name"]) )
device_index = i
return device_index
if device_index == None:
print( "No preferred input found; using default input device." )
return device_index
def open_mic_stream( self ):
device_index = self.find_input_device()
stream = self.pa.open( format = FORMAT,
channels = CHANNELS,
rate = RATE,
input = True,
input_device_index = device_index,
frames_per_buffer = INPUT_FRAMES_PER_BLOCK)
return stream
def listen(self):
try:
block = self.stream.read(INPUT_FRAMES_PER_BLOCK)
except IOError as e:
self.errorcount += 1
print( "(%d) Error recording: %s"%(self.errorcount,e) )
self.noisycount = 1
return
amplitude = get_rms( block )
if amplitude > self.tap_threshold:
self.quietcount = 0
self.noisycount += 1
if self.noisycount > OVERSENSITIVE:
self.tap_threshold *= 1.1
else:
if 1 <= self.noisycount <= MAX_TAP_BLOCKS:
return "True-Mic"
self.noisycount = 0
self.quietcount += 1
if self.quietcount > UNDERSENSITIVE:
self.tap_threshold *= 2
def Tester():
tt = TapTester()
while True:
kk = tt.listen()
if "True-Mic" == kk:
print("")
print("> Clap Detected: Starting The Jarvis.")
print("")
return "True-Mic" | AI-Jarvis | /AI_Jarvis-0.0.3-py3-none-any.whl/AI_Jarvis/Clap.py | Clap.py |

<img src="https://raw.githubusercontent.com/npulagam/AI-Starter/master/Industrialized_AI_Animation.gif" height="500" width="900" ></img>
# DXC Industrialized AI Starter
DXC Industrialized AI Starter makes it easy for you to deploy your AI algorithms (Industrialize). If you are a data scientist, working on an algorithm that you would like to deploy across the enterprise, DXC's Industrialized AI starter makes it easier for you to:
- Access, clean, and explore raw data
- Build data pipelines
- Run AI experiments
- Publish microservices
## Installation
In order to install and use the DXC AI Starter library, please use the below code snippet:
```python
1. pip install DXC-Industrialized-AI-Starter
2. from dxc import ai
```
## Getting Started
### Access, Clean, and Explore Raw Data
Use the library to access, clean, and explore your raw data.
``` python
#Access raw data
df = ai.read_data_frame_from_remote_json(json_url)
df = ai.read_data_frame_from_remote_csv(csv_url)
df = ai.read_data_frame_from_local_json()
df = ai.read_data_frame_from_local_csv()
df = ai.read_data_frame_from_local_excel_file()
#Clean data: Imputes missing data, removes empty rows and columns, anonymizes text.
raw_data = ai.clean_dataframe(df)
#Explore complete data as a HTML interactive report
report = ai.explore_complete_data(df)
report.to_notebook_iframe()
#Explore raw data:
ai.visualize_missing_data(raw_data) #visualizes relationships between all features in data.
ai.explore_features(raw_data) #creates a visual display of missing data.
ai.plot_distributions(raw_data) #creates a distribution graph for each column.
```
[Click here](https://dxc-technology.github.io/DXC-Industrialized-AI-Starter/access_clean/) for details about Acess,clean,explore raw data.
### Build Data Pipelines
Pipelines are a standard way to process your data towards modeling and interpreting. By default, the DXC AI Starter library uses the free tier of [MongoDB Atlas](https://account.mongodb.com/account/register) to store raw data and execute pipelines. In order to get started, you need to first have an <a href= "https://account.mongodb.com/account/register" target="_blank">MongoDB</a> account which you can signup for free and create a database "connection_string" and specify those details in the data_layer below. The following code connects to MongoDB and stores raw data for processing.
```python
#Insert data into MongoDB:
data_layer = {
"connection_string": "<your connection_string>",
"collection_name": "<your collection_name>",
"database_name": "<your database_name>",
"data_source":"<Source of your datset>",
"cleaner":"<whether applied cleaner yes/no >"
}
wrt_raw_data = ai.write_raw_data(data_layer, raw_data, date_fields = [])
```
Once raw data is stored, you can run pipelines to transform the data. This code instructs the data store on how to refine the output of raw data into something that can be used to train a machine-learning model. Please refer to the syntax of [MongDB pipelines](https://docs.mongodb.com/manual/core/aggregation-pipeline/) for the details of how to write a pipeline. Below is an example of creating and executing a pipeline.
```python
pipeline = [
{
'$group':{
'_id': {
"funding_source":"$funding_source",
"request_type":"$request_type",
"department_name":"$department_name",
"replacement_body_style":"$replacement_body_style",
"equipment_class":"$equipment_class",
"replacement_make":"$replacement_make",
"replacement_model":"$replacement_model",
"procurement_plan":"$procurement_plan"
},
"avg_est_unit_cost":{"$avg":"$est_unit_cost"},
"avg_est_unit_cost_error":{"$avg":{ "$subtract": [ "$est_unit_cost", "$actual_unit_cost" ] }}
}
}
]
df = ai.access_data_from_pipeline(wrt_raw_data, pipeline) #refined data will be stored in pandas dataframe.
```
<a href= "https://dxc-technology.github.io/DXC-Industrialized-AI-Starter/data_pipeline/" target="_blank">Click here</a> for details about building data pipeline.
### Run AI Experiments
Use the DXC AI Starter to build and test algorithms. This code executes an experiment by running run_experiment() on an experiment design.
```python
experiment_design = {
#model options include ['tpot_regression()', 'tpot_classification()', 'timeseries']
"model": ai.tpot_regression(),
"labels": df.avg_est_unit_cost_error,
"data": df,
#Tell the model which column is 'output'
#Also note columns that aren't purely numerical
#Examples include ['nlp', 'date', 'categorical', 'ignore']
"meta_data": {
"avg_est_unit_cost_error": "output",
"_id.funding_source": "categorical",
"_id.department_name": "categorical",
"_id.replacement_body_style": "categorical",
"_id.replacement_make": "categorical",
"_id.replacement_model": "categorical",
"_id.procurement_plan": "categorical"
}
}
trained_model = ai.run_experiment(experiment_design, verbose = False, max_time_mins = 5, max_eval_time_mins = 0.04, config_dict = None, warm_start = False, export_pipeline = True, scoring = None)
```
[Click here](https://dxc-technology.github.io/DXC-Industrialized-AI-Starter/experiment/) for details about run AI experiments.
### Publish Microservice
The DXC AI Starter library makes it easy to publish your models as working microservices. By default, the DXC AI Starter library uses free tier of [Algorithmia](https://algorithmia.com/signup) to publish models as microservices. You must create an [Algorithmia](https://algorithmia.com/signup) account to use. Below is the example for publishing a microservice.
```python
#trained_model is the output of run_experiment() function
microservice_design = {
"microservice_name": "<Name of your microservice>",
"microservice_description": "<Brief description about your microservice>",
"execution_environment_username": "<Algorithmia username>",
"api_key": "<your api_key>",
"api_namespace": "<your api namespace>",
"model_path":"<your model_path>"
}
#publish the micro service and display the url of the api
api_url = ai.publish_microservice(microservice_design, trained_model)
print("api url: " + api_url)
```
[Click here](https://dxc-technology.github.io/DXC-Industrialized-AI-Starter/publish_microservice/) for details about publishing microservice.
## Docs
For detailed and complete documentation, please <a href="https://dxc-technology.github.io/DXC-Industrialized-AI-Starter/" target="_blank">click here</a>
### Example notebooks
<a href="https://nbviewer.jupyter.org/github/dxc-technology/DXC-Industrialized-AI-Starter/tree/c58754247060262ac0949396e48f71861cb79d4e/Examples/" target="_blank">Here</a> are example notebooks for individual models. These sample notebooks help to understand on how to use each function, what parameters are expected for each function and what will be the output of each function in a model.
### Contributing Guide
To know more about the contribution and guidelines please <a href="https://github.com/dxc-technology/DXC-Industrialized-AI-Starter/blob/master/CONTRIBUTING.md" target="_blank">click here</a>
### Reporting Issues
If you find any issues, feel free to report them <a href="https://github.com/dxc-technology/DXC-Industrialized-AI-Starter/issues" target="_blank">here</a> with clear description of your issue. You can use the existing templates for creating issues.
| AI-Starter | /AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/README.md | README.md |
from .AI_guild.AI_guild import guild_member_should_apply_for_badge
from .AI_guild.AI_guild import apply_for_an_ai_badge
from .AI_guild.AI_guild import AI_Guild_Role
from .AI_guild.AI_guild import AI_Badge
from .read_data.read_json import flatten_json_into_dataframe
from .read_data.read_json import get_file_path_json
from .read_data.read_json import read_data_frame_from_remote_json
from .read_data.read_json import read_data_frame_from_local_json
from .read_data.read_json import read_data_frame_from_local_json
from .read_data.read_excel import get_file_path_excel
from .read_data.read_excel import read_data_frame_from_local_excel_file
from .read_data.read_csv import get_file_path_csv
from .read_data.read_csv import read_data_frame_from_local_csv
from .read_data.read_csv import read_data_frame_from_remote_csv
from .read_data.read_json import read_data_frame_from_remote_json
from .clean_data.clean_data import clean_dataframe
from .visualization.visualization import explore_features
from .visualization.visualization import visualize_missing_data
from .visualization.visualization import plot_distributions
from .visualization.visualization import explore_complete_data
from .pipeline.pipeline import convert_dates_from_arrow_to_string
from .pipeline.pipeline import write_raw_data
from .pipeline.pipeline import access_data_from_pipeline
from .pipeline.pipeline import store_data_from_pipeline
from .run_model.run_model import get_data_from_pipeline
from .run_model.run_model import run_experiment
from .run_model.run_model import model
from .run_model.run_model import prediction
from .run_model.run_model import regression
from .run_model.run_model import classification
from .run_model.run_model import tpot_regression
from .run_model.run_model import tpot_classification
from .run_model.interpret_model import Global_Model_Explanation
from .run_model.interpret_model import Explanation_Dashboard
# from .run_model.clustering import Clustering
# from .sentiment_analysis.sentiment_analysis import texts_from_df
# from .sentiment_analysis.sentiment_analysis import get_model_learner
# from .sentiment_analysis.sentiment_analysis import get_predictor
# from .unsupervised_sentiment_analysis.kmeans_sentiment_analysis import get_text_clean
# from .unsupervised_sentiment_analysis.kmeans_sentiment_analysis import kmeans_sentiment_analyzer
# from .unsupervised_sentiment_analysis.kmeans_sentiment_analysis import kmeans_sentiment_predictor
from .publish_microservice.publish_microservice import publish_microservice
# from .deep_learning.image_classifier import create_training_data
# from .deep_learning.image_classifier import seggregate_data
# from .deep_learning.image_classifier import split_normalize_data
# from .deep_learning.image_classifier import image_classifier
from .logging.pipeline_logging import pipeline_log
from .logging.experiment_design_logging import experiment_design_log
from .logging.microservice_logging import microservice_design_log
from .datasets._base import load_data
# from .datasets._base import load_data_details
# from .datasets._base import get_data | AI-Starter | /AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/dxc/ai/__init__.py | __init__.py |
from pymongo import MongoClient #MongoDB
import pandas as pd
import json
from dxc.ai.global_variables import globals_file
from dxc.ai.logging import pipeline_logging
def convert_dates_from_arrow_to_string(df, arrow_date_fields):
for field in arrow_date_fields:
df[field] = df[field].apply(format)
return(df)
#inserting data into mongo DB
def insert_collection(data_layer, collection_name, df):
client = MongoClient(data_layer["connection_string"]) #connect to the data layer
collections = client[data_layer["database_name"]].list_collection_names()
db = client[data_layer["database_name"]][collection_name]
#delete the collection if it exists
if collection_name not in collections:
db.insert_many(df.to_dict('records'))
else:
db.drop()
db.insert_many(df.to_dict('records'))
return db
def write_raw_data(data_layer, raw_data, arrow_date_fields = []):
##make the column names lower case and remove spaces
if globals_file.clean_data_used == True:
raw_data = raw_data.clean_names()
globals_file.wrt_raw_data_used = True
globals_file.clean_data_used = False
##convert your raw data into writable data by converting Arrow dates to strings
writable_raw_data = convert_dates_from_arrow_to_string(raw_data, arrow_date_fields)
#inserting data into MongoDB collection
db = insert_collection(data_layer, data_layer["collection_name"], writable_raw_data)
return db
#Handle case-sensitive for column names in pipeline
def col_header_conv_1(pipe):
for key,value in pipe.items():
if type(value) is dict:
col_header_conv_1(value)
elif type(value) is list:
for each in value:
if type(each) is dict:
col_header_conv_1(each)
else:
if isinstance(each, str):
j = value.index(each)
value[j] = '_'.join(each.split()).lower()
else:
if isinstance(value, str):
pipe[key] = '_'.join(value.split()).lower()
return pipe
def col_header_conv(pipe):
for i in range(len(pipe)):
single_pipe = pipe[i]
new_value = col_header_conv_1(single_pipe)
pipe[i] = new_value
return pipe
def access_data_from_pipeline(db, pipe):
pipeline_logging.pipeline_log(pipe)
if globals_file.wrt_raw_data_used == True or globals_file.clean_data_used == True:
pipe = col_header_conv(pipe)
globals_file.wrt_raw_data_used = False
globals_file.clean_data_used = False
data = db.aggregate(pipeline=pipe)
data = list(data)
df = pd.json_normalize(data)
globals_file.run_experiment_warm_start = False
return df
def store_data_from_pipeline(data_layer, df):
db = insert_collection(data_layer, data_layer["collection_name"] + '_aggregate', df)
print('Created data piple line has stored in MongoDB in "%s" collection under "%s" datbase' %(data_layer["collection_name"] + '_aggregate',data_layer["database_name"] )) | AI-Starter | /AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/dxc/ai/pipeline/pipeline.py | pipeline.py |
# training_data =[]
# #This function separates both features & labels from training data
# def seggregate_data(training_data):
# X = []
# Y = []
# for features, lables in training_data:
# X.append(features)
# Y.append(lables)
# X = np.array(X)
# Y = np.array(Y)
# return X,Y
# #This function reads each folder and each image and converts into array
# #This function internally calls seggregate_data to separate features & labels
# def create_training_data(CATEGORIES,DATADIR, IMG_SIZE = 100):
# for category in CATEGORIES:
# class_num = CATEGORIES.index(category)
# path = os.path.join(DATADIR,category)
# for img in os.listdir(path):
# try:
# img_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_GRAYSCALE)
# new_array = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))
# training_data.append([new_array,class_num])
# except Exception as e:
# pass
# random.shuffle(training_data)
# features,labels = seggregate_data(training_data)
# return features,labels
# #This function splits the data into test and train
# #This function normalizes the features values in both train and test
# #This function converts label values to binary matrix
# def split_normalize_data(X, Y, category_count, TEST_SIZE = 0.20,IMG_SIZE=100 ):
# x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = TEST_SIZE)
# x_train = x_train.reshape(x_train.shape[0], IMG_SIZE, IMG_SIZE, 1)
# x_test = x_test.reshape(x_test.shape[0], IMG_SIZE, IMG_SIZE, 1)
# x_train = x_train.astype('float32')
# x_test = x_test.astype('float32')
# x_train/=255
# x_test/=255
# y_train = tf.keras.utils.to_categorical(y_train, category_count)
# y_test = tf.keras.utils.to_categorical(y_test, category_count)
# return x_train, x_test, y_train, y_test
# #This function creates a model compiling input, Hidden and output layers
# def image_classifier(category_count, IMG_SIZE = 100):
# model = tf.keras.models.Sequential()
# model.add(tf.keras.layers.Conv2D(32,(3,3),input_shape=(IMG_SIZE,IMG_SIZE,1)))
# model.add(tf.keras.layers.Dense(32,activation=tf.nn.relu))
# model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2)))
# model.add(tf.keras.layers.Conv2D(64, (3,3)))
# model.add(tf.keras.layers.Dense(64,activation=tf.nn.relu))
# model.add(tf.keras.layers.MaxPooling2D(pool_size=(2,2)))
# model.add(tf.keras.layers.Flatten())
# model.add(tf.keras.layers.Dropout(0.2))
# model.add(tf.keras.layers.Dense(category_count,activation=tf.nn.softmax))
# model.compile(optimizer='adam',
# loss='categorical_crossentropy',
# metrics=['accuracy'])
# return model | AI-Starter | /AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/dxc/ai/deep_learning/image_classifier.py | image_classifier.py |
import Algorithmia
from Algorithmia.errors import AlgorithmException
import shutil #serializing models
import urllib.parse #input data
from git import Git, Repo, remote
import os
import pickle
from IPython.display import YouTubeVideo
from IPython.core.magic import register_line_cell_magic
import urllib.request, json
from dxc.ai.global_variables import globals_file
from dxc.ai.logging import microservice_logging
def publish_microservice(microservice_design, trained_model, verbose = False):
#Capture microservice_design in log
microservice_logging.microservice_design_log(microservice_design)
# create a connection to algorithmia
client=Algorithmia.client(microservice_design["api_key"])
api = client.algo(microservice_design["execution_environment_username"] + "/" + microservice_design["microservice_name"])
##Defining the environment for Algorithmia
try:
if microservice_design["environment"].lower() == 'default':
run_environment = "python38"
else:
run_environment = microservice_design["environment"]
except:
run_environment = "python38"
# create the algorithm if it doesn't exist
try:
api.create(
details = {
"summary": microservice_design["microservice_description"],
"label": microservice_design["microservice_name"],
"tagline": microservice_design["microservice_description"]
},
settings = {
"source_visibility": "closed",
"package_set": run_environment,
"license": "apl",
"network_access": "full",
"pipeline_enabled": True
}
)
except Exception as error:
print(error)
# create data collection if it doesn't exist
if not client.dir(microservice_design["model_path"]).exists():
client.dir(microservice_design["model_path"]).create()
# define a local work directory
local_dir = microservice_design["microservice_name"]
# delete local directory if it already exists
if os.path.exists(local_dir):
shutil.rmtree(local_dir)
# create local work directory
os.makedirs(local_dir)
# serialize the model locally
local_model = "{}/{}".format(local_dir, "mdl")
# open a file in a specified location
file = open(local_model, 'wb')
# dump information to that file
pickle.dump(trained_model, file)
# close the file
file.close()
# upload our model file to our data collection
api_model = "{}/{}".format(microservice_design["model_path"], microservice_design["microservice_name"])
client.file(api_model).putFile(local_model)
if globals_file.run_experiment_encoder_used:
encode_model = 'encode_file.pkl'
encode_output = open(encode_model, 'wb')
pickle.dump(globals_file.run_experiment_encoder, encode_output)
encode_output.close()
encode_folder = microservice_design["microservice_name"] + '_encoder'
encode_path = "{}/{}".format(microservice_design["model_path"], encode_folder)
client.file(encode_path).putFile(encode_model)
if globals_file.run_experiment_target_encoder_used:
target_encode_model = 'target_encode_file.pkl'
target_encode_output = open(target_encode_model, 'wb')
pickle.dump(globals_file.run_experiment_target_encoder, target_encode_output)
target_encode_output.close()
target_encode_folder = microservice_design["microservice_name"] + '_target_encoder'
target_encode_path = "{}/{}".format(microservice_design["model_path"], target_encode_folder)
client.file(target_encode_path).putFile(target_encode_model)
# encode API key, so we can use it in the git URL
encoded_api_key = urllib.parse.quote_plus(microservice_design["api_key"])
algo_repo = "https://{}:{}@git.algorithmia.com/git/{}/{}.git".format(
microservice_design["execution_environment_username"],
encoded_api_key,
microservice_design["execution_environment_username"],
microservice_design["microservice_name"]
)
class Progress(remote.RemoteProgress):
if verbose == False:
def line_dropped(self, line):
pass
def update(self, *args):
pass
else:
def line_dropped(self, line):
print(line)
def update(self, *args):
print(self._cur_line)
p = Progress()
try:
Repo.clone_from(algo_repo, "{}/{}".format(local_dir, microservice_design["microservice_name"]), progress=p)
cloned_repo = Repo("{}/{}".format(local_dir, microservice_design["microservice_name"]))
except Exception as error:
print("here")
print(error)
api_script_path = "{}/{}/src/{}.py".format(local_dir, microservice_design["microservice_name"], microservice_design["microservice_name"])
dependency_file_path = "{}/{}/{}".format(local_dir, microservice_design["microservice_name"], "requirements.txt")
# defines the source for the microservice
results = "{'results':prediction}"
file_path = "'" + api_model + "'"
if globals_file.run_experiment_encoder_used:
encodefile_path = "'" + encode_path + "'"
if globals_file.run_experiment_target_encoder_used:
target_encodefile_path = "'" + target_encode_path + "'"
##Don't change the structure of below docstring
##this is the source code needed for the microservice
src_code_content = """import Algorithmia
from Algorithmia import ADK
import pandas as pd
import pickle
import json
import numpy as np
# create an Algorithmia client
client = Algorithmia.client()
def load_model():
# Get file by name
# Open file and load model
\tfile_path = {file_path}
\tmodel_path = client.file(file_path).getFile().name
# Open file and load model
\twith open(model_path, 'rb') as f:
\t\tmodel = pickle.load(f)
\t\treturn model
trained_model = load_model()
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def apply(input):
\tprediction = trained_model.predict(pd.DataFrame(input,index = [0]))
\tprediction = json.dumps(prediction, default=default)
\treturn {results}
algorithm = ADK(apply)
algorithm.init("Algorithmia")"""
## source code for customized model
src_code_generalized = """import Algorithmia
from Algorithmia import ADK
import pandas as pd
import pickle
import json
import numpy as np
# create an Algorithmia client
client = Algorithmia.client()
def load_model():
# Get file by name
# Open file and load model
\tfile_path = {file_path}
\tmodel_path = client.file(file_path).getFile().name
# Open file and load model
\twith open(model_path, 'rb') as f:
\t\tmodel = pickle.load(f)
\t\treturn model
trained_model = load_model()
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def apply(input):
\tprediction = trained_model.predict(pd.DataFrame(input, index = [0]))
\tprediction = json.dumps(prediction, default=default)
\treturn {results}
algorithm = ADK(apply)
algorithm.init("Algorithmia")"""
## source code for generalized tpot model
src_code_generalized_encode = """import Algorithmia
from Algorithmia import ADK
import pandas as pd
import pickle
import json
import numpy as np
import feature_engine
# create an Algorithmia client
client = Algorithmia.client()
def load_model():
# Get file by name
# Open file and load model
\tfile_path = {file_path}
\tmodel_path = client.file(file_path).getFile().name
# Open file and load model
\twith open(model_path, 'rb') as f:
\t\tmodel = pickle.load(f)
\t\treturn model
trained_model = load_model()
def load_encode():
# Get file by name
# Open file and load encoder
\tencodefile_path = {encodefile_path}
\tencode_path = client.file(encodefile_path).getFile().name
# Open file and load encoder
\twith open(encode_path, 'rb') as f:
\t\tencoder = pickle.load(f)
\t\treturn encoder
encode = load_encode()
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def apply(input):
\tinput = pd.DataFrame([input])
\ttry:
\t\tinput = encode.transform(input)
\texcept:
\t\tpass
\tprediction = trained_model.predict(input)
\tprediction = json.dumps(prediction[0], default=default)
\treturn {results}
algorithm = ADK(apply)
algorithm.init("Algorithmia")"""
## source code for generalized tpot model
src_code_generalized_target_encode = """import Algorithmia
from Algorithmia import ADK
import pandas as pd
import pickle
import json
import numpy as np
import feature_engine
# create an Algorithmia client
client = Algorithmia.client()
def load_model():
# Get file by name
# Open file and load model
\tfile_path = {file_path}
\tmodel_path = client.file(file_path).getFile().name
# Open file and load model
\twith open(model_path, 'rb') as f:
\t\tmodel = pickle.load(f)
\t\treturn model
trained_model = load_model()
def load_target_encode():
# Get file by name
# Open file and load target encoder
\ttarget_encodefile_path = {target_encodefile_path}
\ttarget_encode_path = client.file(target_encodefile_path).getFile().name
# Open file and load target encoder
\twith open(target_encode_path, 'rb') as f:
\t\ttarget_encoder = pickle.load(f)
\t\treturn target_encoder
target_encode = load_target_encode()
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def apply(input):
\tinput = pd.DataFrame([input])
\ttry:
\t\tinput = encode.transform(input)
\texcept:
\t\tpass
\tprediction = trained_model.predict(input)
\ttry:
\t\tprediction = target_encode.inverse_transform(prediction)
\t\tprediction = prediction[0]
\texcept:
\t\tprediction = json.dumps(prediction[0], default=default)
\treturn {results}
algorithm = ADK(apply)
algorithm.init("Algorithmia")"""
## source code for generalized tpot model
src_code_generalized_both_encode = """import Algorithmia
from Algorithmia import ADK
import pandas as pd
import pickle
import json
import numpy as np
import feature_engine
# create an Algorithmia client
client = Algorithmia.client()
def load_model():
# Get file by name
# Open file and load model
\tfile_path = {file_path}
\tmodel_path = client.file(file_path).getFile().name
# Open file and load model
\twith open(model_path, 'rb') as f:
\t\tmodel = pickle.load(f)
\t\treturn model
trained_model = load_model()
def load_encode():
# Get file by name
# Open file and load encoder
\tencodefile_path = {encodefile_path}
\tencode_path = client.file(encodefile_path).getFile().name
# Open file and load encoder
\twith open(encode_path, 'rb') as f:
\t\tencoder = pickle.load(f)
\t\treturn encoder
encode = load_encode()
def load_target_encode():
# Get file by name
# Open file and load target encoder
\ttarget_encodefile_path = {target_encodefile_path}
\ttarget_encode_path = client.file(target_encodefile_path).getFile().name
# Open file and load target encoder
\twith open(target_encode_path, 'rb') as f:
\t\ttarget_encoder = pickle.load(f)
\t\treturn target_encoder
target_encode = load_target_encode()
def default(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
def apply(input):
\tinput = pd.DataFrame([input])
\ttry:
\t\tinput = encode.transform(input)
\texcept:
\t\tpass
\tprediction = trained_model.predict(input)
\ttry:
\t\tprediction = target_encode.inverse_transform(prediction)
\t\tprediction = prediction[0]
\texcept:
\t\tprediction = json.dumps(prediction[0], default=default)
\treturn {results}
algorithm = ADK(apply)
algorithm.init("Algorithmia")"""
if globals_file.run_experiment_used:
src_code_content = src_code_generalized
if globals_file.run_experiment_encoder_used and not globals_file.run_experiment_target_encoder_used:
src_code_content = src_code_generalized_encode
if globals_file.run_experiment_target_encoder_used and not globals_file.run_experiment_encoder_used:
src_code_content = src_code_generalized_target_encode
if globals_file.run_experiment_encoder_used and globals_file.run_experiment_target_encoder_used:
src_code_content = src_code_generalized_both_encode
splitted=src_code_content.split('\n')
##writes the source into the local, cloned GitHub repository
with open(api_script_path, "w") as f:
for line in splitted:
if line.strip()=="file_path = {file_path}":
line="\tfile_path = {}".format(file_path)
if line.strip()=="encodefile_path = {encodefile_path}":
line="\tencodefile_path = {}".format(encodefile_path)
if line.strip()=="target_encodefile_path = {target_encodefile_path}":
line="\ttarget_encodefile_path = {}".format(target_encodefile_path)
if line.strip()=="return {results}":
line="\treturn {}".format(results)
f.write(line + '\n')
##Don't change the structure of below docstring
##this is the requirements needed for microservice
requirements_file_content="""algorithmia
pandas
numpy
feature-engine"""
post_split=requirements_file_content.split('\n')
#writes the requirements file into the local, cloned GitHub repository.
with open(dependency_file_path, "w") as f:
for line in post_split:
line = line.lstrip()
f.write(line + '\n')
# Publish the microservice
files = ["src/{}.py".format(microservice_design["microservice_name"]), "requirements.txt"]
cloned_repo.index.add(files)
cloned_repo.index.commit("Add algorithm files")
origin = cloned_repo.remote(name='origin')
p = Progress()
origin.push(progress=p)
# publish/deploy our algorithm
#client.algo(microservice_design["api_namespace"]).publish()
# api.publish(
# settings = {
# "algorithm_callability": "private"
# },
# version_info = {
# "release_notes": "Publishing Microservice",
# "version_type": "revision"
# },
# details = {
# "label": microservice_design["microservice_name"]
# }
# )
api.publish(
details = {
"label": microservice_design["microservice_name"]
}
)
# code generates the api endpoint for the newly published microservice
latest_version = client.algo(microservice_design["api_namespace"]).info().version_info.semantic_version
api_url = "https://api.algorithmia.com/v1/algo/{}/{}".format(microservice_design["api_namespace"], latest_version)
return api_url | AI-Starter | /AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/dxc/ai/publish_microservice/publish_microservice.py | publish_microservice.py |
import json
import requests
from enum import Enum
class AI_Guild_Role(Enum):
PROJECT_MANAGER = 1,
DATA_SCIENTIST = 2,
DATA_ENGINEER = 3,
ALL = 4
class AI_Badge(Enum):
CREATE_DATA_STORIES = 1
RUN_AGILE_TRANSFORMATION = 2
BUILD_DATA_PIPELINES = 3
RUN_AI_EXPERIMENT = 4
BUILD_UTILITY_AI_SERVICES = 5
PERFORM_AI_FORENSICS = 6
TEST = 7
def guild_member_should_apply_for_badge(guild_member_roles, ai_badge):
guild_role_badges = {
AI_Guild_Role.PROJECT_MANAGER : [AI_Badge.CREATE_DATA_STORIES, AI_Badge.RUN_AGILE_TRANSFORMATION],
AI_Guild_Role.DATA_SCIENTIST : [AI_Badge.RUN_AI_EXPERIMENT, AI_Badge.PERFORM_AI_FORENSICS],
AI_Guild_Role.DATA_ENGINEER : [AI_Badge.BUILD_DATA_PIPELINES, AI_Badge.BUILD_UTILITY_AI_SERVICES],
AI_Guild_Role.ALL: [AI_Badge.CREATE_DATA_STORIES, AI_Badge.RUN_AGILE_TRANSFORMATION, AI_Badge.BUILD_DATA_PIPELINES, AI_Badge.RUN_AI_EXPERIMENT, AI_Badge.BUILD_UTILITY_AI_SERVICES, AI_Badge.PERFORM_AI_FORENSICS]
}
ai_badge_id = {
AI_Badge.CREATE_DATA_STORIES: "dd05bbdf-ad5b-469d-ab2c-4dd218fd68fe",
AI_Badge.RUN_AGILE_TRANSFORMATION: "8ec48861-c355-4e44-b98f-0a80cf1440a8",
AI_Badge.BUILD_DATA_PIPELINES: "2cffc101-8fc3-4680-a8cd-29ec58483832",
AI_Badge.RUN_AI_EXPERIMENT: "ddeb2020-1db5-48c6-8b2c-ea2e50f050d7",
AI_Badge.BUILD_UTILITY_AI_SERVICES: "6e8b661f-31bc-46f7-89e4-194e7e6ebb21",
AI_Badge.PERFORM_AI_FORENSICS: "ec96e016-9e33-4b4e-a6bd-43491e811179",
AI_Badge.TEST: "b828e318-8501-434e-9f55-ccdb7000ee09"
}
#start by assuming we should not apply for the badge
apply_for_badge = False
for role in guild_member_roles:
#apply for the badge if a matching role is found
if ai_badge in guild_role_badges[role]: apply_for_badge = True
return apply_for_badge
def apply_for_an_ai_badge(ai_guild_profile, ai_badge):
guild_role_badges = {
AI_Guild_Role.PROJECT_MANAGER : [AI_Badge.CREATE_DATA_STORIES, AI_Badge.RUN_AGILE_TRANSFORMATION],
AI_Guild_Role.DATA_SCIENTIST : [AI_Badge.RUN_AI_EXPERIMENT, AI_Badge.PERFORM_AI_FORENSICS],
AI_Guild_Role.DATA_ENGINEER : [AI_Badge.BUILD_DATA_PIPELINES, AI_Badge.BUILD_UTILITY_AI_SERVICES],
AI_Guild_Role.ALL: [AI_Badge.CREATE_DATA_STORIES, AI_Badge.RUN_AGILE_TRANSFORMATION, AI_Badge.BUILD_DATA_PIPELINES, AI_Badge.RUN_AI_EXPERIMENT, AI_Badge.BUILD_UTILITY_AI_SERVICES, AI_Badge.PERFORM_AI_FORENSICS]
}
ai_badge_id = {
AI_Badge.CREATE_DATA_STORIES: "dd05bbdf-ad5b-469d-ab2c-4dd218fd68fe",
AI_Badge.RUN_AGILE_TRANSFORMATION: "8ec48861-c355-4e44-b98f-0a80cf1440a8",
AI_Badge.BUILD_DATA_PIPELINES: "2cffc101-8fc3-4680-a8cd-29ec58483832",
AI_Badge.RUN_AI_EXPERIMENT: "ddeb2020-1db5-48c6-8b2c-ea2e50f050d7",
AI_Badge.BUILD_UTILITY_AI_SERVICES: "6e8b661f-31bc-46f7-89e4-194e7e6ebb21",
AI_Badge.PERFORM_AI_FORENSICS: "ec96e016-9e33-4b4e-a6bd-43491e811179",
AI_Badge.TEST: "b828e318-8501-434e-9f55-ccdb7000ee09"
}
# Construct apiEndponit string
apiPath = f'badges/{ai_badge_id[ai_badge]}/assertions'
apiEndpoint = f'{ai_guild_profile["badge_platform_apiHost"]}{ai_guild_profile["badge_platform_apiBasePath"]}{apiPath}'
headers = {
'Content-Type': 'application/json',
'X-Api-Key': ai_guild_profile["badge_platform_apiKey"]
}
#for each member in the guild:
num_guild_members = len(ai_guild_profile['guild_members'])
for i in range(1,num_guild_members + 1):
#apply for the badge if applicable
if guild_member_should_apply_for_badge(ai_guild_profile['guild_members'][i]['roles'], ai_badge):
payload = {
'email': ai_guild_profile['guild_members'][i]['badge_applicant_email'],
'evidence': ai_guild_profile["badge_evidence"]
}
response = requests.post(
apiEndpoint,
headers=headers,
json=payload
)
print(response)
print(json.dumps(response.json(), indent=4)) | AI-Starter | /AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/dxc/ai/AI_guild/AI_guild.py | AI_guild.py |
import pandas as pd
import janitor #data cleaning
from ftfy import fix_text #data cleaning
import nltk #data cleaning
nltk.download('punkt') #data cleaning
import scrubadub #data cleaning
import arrow #normalizing dates
import numpy as np
from sklearn.base import TransformerMixin
from dxc.ai.global_variables import globals_file
class DataFrameImputer(TransformerMixin):
def __init__(self):
"""Impute missing values.
Columns of dtype object are imputed with the most frequent value
in column.
Columns of other types are imputed with mean of column.
"""
def fit(self, X, y=None):
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O') else X[c].mean() for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.fill)
#CLEANING FILE
def clean_dataframe(df, impute = False, text_fields = [], date_fields = [], numeric_fields = [], categorical_fields = []):
clean_df = (
df
#make the column names lower case and remove spaces
.clean_names()
#remove empty columns
.remove_empty()
#remove empty rows and columns
.dropna(how='all')
)
#remove harmful characters. remove personal identifiers. make lowercase
for field in text_fields:
field = '_'.join(field.split()).lower()
clean_df[field] = clean_df[field].fillna(' ').apply(fix_text)
clean_df[field] = clean_df[field].apply(scrubadub.clean, replace_with='identifier')
clean_df[field] = clean_df[field].str.lower()
#impute missing values
if impute:
clean_df = DataFrameImputer().fit_transform(clean_df)
#standardize the format of all date fields
for field in date_fields:
field = '_'.join(field.split()).lower()
clean_df[field] = clean_df[field].apply(arrow.get)
#make sure all numeric fields have the proper data type
for field in numeric_fields:
field = '_'.join(field.split()).lower()
clean_df[field] = pd.to_numeric(clean_df[field])
#make sure all categorical variables have the proper data type
for field in categorical_fields:
field = '_'.join(field.split()).lower()
clean_df[field] = clean_df[field].astype('category')
clean_df=clean_df.clean_names()
globals_file.clean_data_used = True
return(clean_df) | AI-Starter | /AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/dxc/ai/clean_data/clean_data.py | clean_data.py |
from yellowbrick.features import Rank2D #exploring raw data
import matplotlib.pyplot as plt
import missingno as msno #gauge dataset completeness
import seaborn as sns #data exploration, distribution plotting
import pandas as pd
from datacleaner import autoclean
import math
from pandas.api.types import is_numeric_dtype
from pandas_profiling import ProfileReport
#VISUALIZATION
#display the correlations in pairwise comparisons of all features
def explore_features(df):
df_copy = df.copy()
#for some reason, the visualize doesn't accept categorical
#variables. those have to be converted to strings
for (col,data) in df_copy.iteritems():
if df_copy[col].dtype.name == "category":
df_copy[col] = df_copy[col].astype(str)
numeric_df = autoclean(df_copy)
visualizer = Rank2D(algorithm="pearson")
visualizer.fit_transform(numeric_df)
visualizer.poof()
#display a visual representation of missing fields in the given data
def visualize_missing_data(df):
msno.matrix(df, figsize=(15,8))
def explore_complete_data(df, title='Complete Data Report'):
profile = ProfileReport(df, title, html={'style':{'full_width':False}})
return profile
#plot the distribution of values of each field in the given data
def plot_distributions(df):
#set plot style
sns.set(style="darkgrid")
features = len(df.columns)
#determine the number of columns in the plot grid and the width and height of each plot
grid_cols = 3
plot_width = 5
plot_height = 3
#determine the width of the plot grid and number of rows
grid_width = plot_width * grid_cols
num_rows = math.ceil(features/grid_cols)
#determine the width of the plot grid
grid_height = plot_height * num_rows
#lay out the plot grid
fig1 = plt.figure(constrained_layout=True, figsize = (grid_width,grid_height))
gs = fig1.add_gridspec(ncols = grid_cols, nrows = num_rows)
#step through the dataframe and add plots for each feature
current_column = 0
current_row = 0
for col in df.columns:
#set up a plot
f1_ax1 = fig1.add_subplot(gs[current_row, current_column])
f1_ax1.set_title(col)
#create a plot for numeric values
if is_numeric_dtype(df[col]):
sns.histplot(df[col], ax = f1_ax1, kde = True).set_xlabel('')
#creare a plot for categorical values
if df[col].dtype.name == "category":
sns.countplot(df[col], ax = f1_ax1, order = df[col].value_counts().index).set_xlabel('')
#move to the next column
current_column +=1
#determine if it is time to start a new row
if current_column == grid_cols:
current_column = 0
current_row +=1 | AI-Starter | /AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/dxc/ai/visualization/visualization.py | visualization.py |
# from statsmodels.tsa.stattools import adfuller
# from statsmodels.tsa.seasonal import seasonal_decompose
# from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
# from sklearn.metrics import mean_squared_error
# from statsmodels.tsa.ar_model import AR
# from statsmodels.tsa.arima_model import ARMA, ARIMA
# from statsmodels.tsa.holtwinters import SimpleExpSmoothing, Holt, ExponentialSmoothing
# import pyaf.ForecastEngine as autof
# from pmdarima.arima import auto_arima
# from pmdarima.model_selection import train_test_split
# import warnings
# warnings.filterwarnings('ignore', 'statsmodels.tsa.ar_model.AR', FutureWarning)
# #Dickey-Fuller Test for stationary check
# def adf_test(timeseries):
# #Perform Dickey-Fuller test:
# print('Results of Augmented Dickey-Fuller(ADF) Statistical Test:')
# dftest = adfuller(timeseries, autolag='AIC')
# dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
# for key,value in dftest[4].items():
# dfoutput['Critical Value (%s)'%key] = value
# print(dfoutput)
# #autoRegressiveModel
# def autoRegressiveModel(df, no_predictions = 7, debug = False , visualize = False):
# data = df.values
# # Splitting data into train and test set.
# train, test = data[1:len(data)-no_predictions], data[len(data)-no_predictions:]
# # train autoregression
# model = AR(train)
# model_fit = model.fit()
# if(debug):
# print(model_fit.summary())
# # make predictions
# predictions = model_fit.predict(start=len(train), end=len(train)+len(test)-1+no_predictions, dynamic=False)
# if(visualize):
# plt.plot(test , color = "blue" , label = "testing data")
# plt.plot(predictions, color='red' , label = "prediction")
# plt.legend(loc='best')
# plt.show()
# error = np.sqrt(mean_squared_error(test, predictions[:no_predictions]))
# return (predictions[-no_predictions:] , error, model_fit)
# def simpleExponentialSmoothing(df, no_predictions=7, debug = False , visualize = False):
# train_data, test_data = df[1:int(len(df)-no_predictions)], df[int(len(df)-no_predictions):]
# fit1 = SimpleExpSmoothing(np.asarray(train_data)).fit(smoothing_level=0.85 , optimized=False)
# if(debug):
# print(fit1.summary())
# predictions = fit1.forecast(no_predictions*2)
# if(visualize):
# plt.plot(list(test_data), color = 'blue', label='testing data')
# plt.plot(list(predictions), color = 'red',label='prediction')
# plt.legend(loc='upper left', fontsize=8)
# plt.show()
# error = np.sqrt(mean_squared_error(test_data, predictions[:no_predictions]))
# return (predictions[-no_predictions:] , error, fit1)
# def doubleSmoothing(df, no_predictions=7, debug = False , visualize = False):
# train_data, test_data = df[1:int(len(df)-no_predictions)], df[int(len(df)-no_predictions):]
# model = ExponentialSmoothing(np.asarray(train_data), trend='add', seasonal=None)
# fit1 = model.fit()
# if(debug):
# print(fit1.summary())
# predictions = fit1.forecast(no_predictions*2)
# if(visualize):
# plt.plot(list(test_data), color = 'blue', label='testing data')
# plt.plot(list(predictions), color = 'red',label='prediction')
# plt.legend(loc='upper left', fontsize=8)
# plt.show()
# error = np.sqrt(mean_squared_error(test_data, predictions[:no_predictions]))
# return (predictions[-no_predictions:] , error, fit1)
# def autoRegressiveMovingAverageModel(df, order=(1,0), no_predictions=7, debug=False , visualize=False):
# data = df.values
# train, test = data[1:len(data)-no_predictions], data[len(data)-no_predictions:]
# model = ARMA(train, order = order)
# model_fit = model.fit()
# if(debug):
# print(model_fit.summary())
# predictions = model_fit.predict(start=len(train), end=len(train)+len(test)-1+no_predictions, dynamic=False)
# if(visualize):
# plt.plot(test , color = "blue" , label = "testing data")
# plt.plot(predictions, color='red' , label = "prediction")
# plt.legend(loc='best')
# plt.show()
# error = np.sqrt(mean_squared_error(test, predictions[:no_predictions]))
# return (predictions[-no_predictions:] , error, model_fit)
# def getBestForcastingModel(df, no_predictions=7, debug=False, visualize = False):
# modelResults = {}
# modelResults["autoRegressiveModel"] = autoRegressiveModel(df, no_predictions)[1]
# modelResults["simpleExponentialSmoothing"] = simpleExponentialSmoothing(df, no_predictions)[1]
# modelResults["doubleSmoothing"] = doubleSmoothing(df, no_predictions)[1]
# modelResults["autoRegressiveMovingAverageModel"] = autoRegressiveMovingAverageModel(df, order=(1,0), no_predictions=no_predictions)[1]
# bestModel = min(modelResults.items(), key=operator.itemgetter(1))[0]
# print(bestModel)
# print(adf_test(df))
# if bestModel == 'autoRegressiveModel':
# results, error, model = autoRegressiveModel(df, no_predictions, debug, visualize)
# print('RMSE:', error)
# return model
# elif bestModel == 'simpleExponentialSmoothing':
# results, error, model = simpleExponentialSmoothing(df, no_predictions, debug, visualize)
# print('RMSE:', error)
# return model
# elif bestModel == 'doubleSmoothing':
# results, error, model = doubleSmoothing(df, no_predictions, debug, visualize)
# print('RMSE:', error)
# return model
# elif bestModel == 'autoRegressiveMovingAverageModel':
# results, error, model = autoRegressiveMovingAverageModel(df, order=(1,0), no_predictions=no_predictions, debug=debug, visualize=visualize)
# print('RMSE:', error)
# return model | AI-Starter | /AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/dxc/ai/run_model/TimeSeriesModels.py | TimeSeriesModels.py |
import pandas as pd
from tpot import TPOTRegressor
from tpot import TPOTClassifier
from sklearn.model_selection import train_test_split
from feature_engine import encoding as ce
from .interpret_model import Global_Model_Explanation
from .interpret_model import Explanation_Dashboard
from sklearn import preprocessing
import warnings
import os
import numpy as np
from dxc.ai.global_variables import globals_file
from sklearn.metrics import SCORERS
###encode and return the categorical data
def categorical_encoding(data,target):
data_1 = data.dropna()
objFeatures = data_1.select_dtypes(include="object").columns
objlen = len(list(objFeatures))
if objlen == 0:
return data_1.drop([target], axis = 1), data_1[target]
else:
if data_1[target].dtype == 'object':
le = preprocessing.LabelEncoder()
try:
data_1[target] = le.fit_transform(data_1[target].astype(str))
except:
data_1[target] = data_1[target].astype('category')
data_1[target] = le.fit_transform(data_1[target].astype(str))
globals_file.run_experiment_target_encoder = le
globals_file.run_experiment_target_encoder_used = True
objFeatures_1 = data_1.select_dtypes(include="object").columns
objlen_1 = len(list(objFeatures_1))
if objlen_1 == 0:
return data_1.drop([target], axis = 1), data_1[target]
else:
data_2 = data_1.drop([target], axis = 1)
encoder = ce.OrdinalEncoder(encoding_method='ordered')
encoder.fit(data_2, data_1[target])
data_3 = encoder.transform(data_2)
globals_file.run_experiment_encoder = encoder
globals_file.run_experiment_encoder_used = True
return data_3, data_1[target]
##Define Tpot regressor
def regressor(verbosity, max_time_mins , max_eval_time_mins, config_dict, warm_start, scoring):
if verbosity == True:
model_def = TPOTRegressor(verbosity=2, max_time_mins=max_time_mins, max_eval_time_mins= max_eval_time_mins, config_dict = config_dict, warm_start = warm_start, scoring= scoring, template = 'Regressor')
else:
model_def = TPOTRegressor(verbosity=0, max_time_mins=max_time_mins, max_eval_time_mins= max_eval_time_mins, config_dict = config_dict, warm_start = warm_start, scoring= scoring, template = 'Regressor')
return model_def
##Define Tpot classifier
def classifier(verbosity, max_time_mins, max_eval_time_mins, config_dict, warm_start, scoring):
if verbosity == True:
model_def = TPOTClassifier(verbosity=2, max_time_mins=max_time_mins, max_eval_time_mins= max_eval_time_mins, config_dict = config_dict, warm_start = warm_start, scoring = scoring, template = 'Classifier')
else:
model_def = TPOTClassifier(verbosity=0, max_time_mins=max_time_mins, max_eval_time_mins= max_eval_time_mins, config_dict = config_dict, warm_start = warm_start, scoring = scoring, template = 'Classifier')
return model_def
###Train the model
def train_model(data, target, model_def, model_type, interpret = False, warm_start = False, export_pipeline = True):
if warm_start == False:
globals_file.run_experiment_encoder = None
globals_file.run_experiment_target_encoder = None
globals_file.run_experiment_target_encoder_used = False
globals_file.run_experiment_encoder_used = False
globals_file.run_experiment_warm_start = False
data_transformed, label_data = categorical_encoding(data,target)
try:
x_train, x_test, y_train, y_test = train_test_split(data_transformed, label_data, test_size=0.2, random_state=0, stratify = label_data)
except:
x_train, x_test, y_train, y_test = train_test_split(data_transformed, label_data, test_size=0.2, random_state=0)
##Save the data for first time execution with warm start.
if warm_start == True and globals_file.run_experiment_warm_start == False:
globals_file.run_experiment_encoder = None
globals_file.run_experiment_target_encoder = None
globals_file.run_experiment_target_encoder_used = False
globals_file.run_experiment_encoder_used = False
data_transformed, label_data = categorical_encoding(data,target)
try:
x_train, x_test, y_train, y_test = train_test_split(data_transformed, label_data, test_size=0.2, random_state=0, stratify = label_data)
except:
x_train, x_test, y_train, y_test = train_test_split(data_transformed, label_data, test_size=0.2, random_state=0)
globals_file.run_experiment_warm_start = True
globals_file.run_experiment_x_train = x_train
globals_file.run_experiment_x_test = x_test
globals_file.run_experiment_y_train = y_train
globals_file.run_experiment_y_test = y_test
globals_file.run_experiment_datatransformed = data_transformed
globals_file.run_experiment_labeldata = label_data
##Return the data from second time execution with warm start
if warm_start == True and globals_file.run_experiment_warm_start == True:
x_train = globals_file.run_experiment_x_train
x_test = globals_file.run_experiment_x_test
y_train = globals_file.run_experiment_y_train
y_test = globals_file.run_experiment_y_test
data_transformed = globals_file.run_experiment_datatransformed
label_data = globals_file.run_experiment_labeldata
model = model_def.fit(x_train, y_train)
globals_file.run_experiment_model = model
best_pipeline = model.fitted_pipeline_
if model_type == 'TPOTRegressor':
print()
# r2 score
score_r2 = SCORERS['r2'](best_pipeline, x_test, y_test)
print("r2 Score:", score_r2)
print()
# neg_mean_squared_error
score_nmse = SCORERS['neg_mean_squared_error'](best_pipeline, x_test, y_test)
print('Negative mean square error:', score_nmse)
print()
# neg_root_mean_squared_error
score_nrmse = SCORERS['neg_root_mean_squared_error'](best_pipeline, x_test, y_test)
print('Negative root mean square error:', score_nrmse)
print()
# explained variance
score_var = SCORERS['explained_variance'](best_pipeline, x_test, y_test)
print('explained_variance:', score_var)
print()
# negative mean absolute error
score_nmae = SCORERS['neg_mean_absolute_error'](best_pipeline, x_test, y_test)
print('Negative_mean_absolute_error:', score_nmae)
print()
# Negative median absolute error
score_nmdae = SCORERS['neg_median_absolute_error'](best_pipeline, x_test, y_test)
print('Negative_median_absolute_error:', score_nmdae)
print()
if model_type == 'TPOTClassifier':
print()
# Accuracy
score_acc = SCORERS['accuracy'](best_pipeline, x_test, y_test)
print("Accuracy:", score_acc)
print()
#ROC_AUC_OVR
try:
score_roc_ovr = SCORERS['roc_auc_ovr'](best_pipeline, x_test, y_test)
print("ROC_AUC_OVR:", score_roc_ovr)
print()
except:
pass
#ROC_AUC_OVO
try:
score_roc_ovo = SCORERS['roc_auc_ovo'](best_pipeline, x_test, y_test)
print("ROC_AUC_OVO:", score_roc_ovo)
print()
except:
pass
# Recall
score_rec_macro = SCORERS['recall_macro'](best_pipeline, x_test, y_test)
print("Recall:", score_rec_macro)
print()
# Precision
score_pre_macro = SCORERS['precision_macro'](best_pipeline, x_test, y_test)
print("Precision:", score_pre_macro)
print()
#F1
score_f1_macro = SCORERS['f1_macro'](best_pipeline, x_test, y_test)
print("F1 Score:", score_f1_macro)
print()
##Save the pipeline and data
if export_pipeline == True:
currentDirectory = os.getcwd()
currentDirectory = currentDirectory + '\data_file.csv'
data_file_combine = data_transformed
data_file_combine['target'] = label_data
data_file_combine.to_csv(currentDirectory, index = False, header=True, encoding='utf-8')
model.export('best_pipeline.py', currentDirectory)
##Generate interpret interactive charts
if interpret == True:
global_explanation = Global_Model_Explanation(model.fitted_pipeline_,x_train,x_test,feature_names = None,classes = None, explantion_data = None)
Explanation_Dashboard(global_explanation, model.fitted_pipeline_, x_train, x_test, explantion_data = None)
return model.fitted_pipeline_ | AI-Starter | /AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/dxc/ai/run_model/model_pipeline.py | model_pipeline.py |
import arrow #normalizing dates
import numpy as np
from sklearn.base import TransformerMixin #impute missing data
#from auto_ml import Predictor #ML models
from sklearn.model_selection import train_test_split
import os
import pickle
from contextlib import redirect_stdout
import warnings
import io
from dxc.ai.global_variables import globals_file
# from .TimeSeriesModels import getBestForcastingModel
from dxc.ai.logging import experiment_design_logging
from .model_pipeline import regressor
from .model_pipeline import train_model
from .model_pipeline import classifier
from pymongo import MongoClient #MongoDB
import pandas as pd
#Getting data from MongoDB
def get_data_from_pipeline(data_layer):
#connect to MongoDB
client = MongoClient(data_layer["connection_string"])
db = client[data_layer["database_name"]][data_layer["collection_name"] + '_aggregate']
#getting aggregated pipeline data
df = pd.json_normalize(list(db.find({},{'_id':0})))
return df
# define the general class of models
class model:
__model = []
def build(self): raise NotImplementedError()
def train_and_score(self, data): raise NotImplementedError()
def interpret(self): raise NotImplementedError()
def python_object(): raise NotImplementedError()
# @staticmethod
# def meta_data_key(meta_data, value):
# key_list = list(meta_data.keys())
# val_list = list(meta_data.values())
# return key_list[val_list.index(value)]
#define the model lifecycle
# define a prediction model
class prediction(model):
@property
def estimator(self):
raise NotImplementedError()
def build(self, target_label, verbose, max_time_mins, max_eval_time_mins, config_dict, warm_start, scoring):
if self.estimator == 'TPOTRegressor':
if globals_file.run_experiment_warm_start == False or warm_start == False:
self.__model = regressor(verbose, max_time_mins, max_eval_time_mins, config_dict, warm_start, scoring)
else:
self.__model = globals_file.run_experiment_model
elif self.estimator == 'TPOTClassifier':
if globals_file.run_experiment_warm_start == False or warm_start == False:
self.__model = classifier(verbose, max_time_mins, max_eval_time_mins, config_dict, warm_start, scoring)
else:
self.__model = globals_file.run_experiment_model
else:
pass
#self.__model = Predictor(type_of_estimator=self.estimator, column_descriptions=meta_data)
self.__label = target_label
def train_and_score(self, data, labels, verbose, interpret, warm_start, export_pipeline):
##Train and score
if self.estimator == 'TPOTRegressor' or self.estimator == 'TPOTClassifier':
self.__model = train_model(data, self.__label, self.__model, self.estimator, interpret, warm_start, export_pipeline)
else:
pass
# create training and test data
#training_data, test_data = train_test_split(data, test_size=0.2)
# train the model
#if verbose == False:
# warnings.filterwarnings('ignore')
# text_trap = io.StringIO()
# with redirect_stdout(text_trap):
# self.__model.train(training_data, verbose=False, ml_for_analytics= False)
#else:
# warnings.filterwarnings('ignore')
# self.__model.train(training_data, verbose=True, ml_for_analytics=False)
# score the model
#if verbose == False:
# self.__model.score(test_data, test_data[self.__label], verbose=0)
#else:
# self.__model.score(test_data, test_data[self.__label], verbose=1)
def interpret(self):
pass
def python_object(self):
return self.__model
# define a regressor model
class regression(prediction):
@property
def estimator(self):
return("regressor")
# define a classification model
class classification(prediction):
@property
def estimator(self):
return("classifier")
# define a tpot classification model
class tpot_classification(prediction):
@property
def estimator(self):
return("TPOTClassifier")
# define a Tpot regressor model
class tpot_regression(prediction):
@property
def estimator(self):
return("TPOTRegressor")
def run_experiment(design, verbose = False, interpret = False, max_time_mins = 5, max_eval_time_mins = 0.04 , config_dict = None, warm_start = False, export_pipeline = True, scoring = None):
experiment_design_logging.experiment_design_log(design)
# if design["model"] == 'timeseries':
# trained_model = getBestForcastingModel(design['labels'], no_predictions=7, debug=verbose, visualize = False)
# return trained_model
globals_file.run_experiment_used = True
design["model"].build(design["labels"].name, verbose, max_time_mins, max_eval_time_mins, config_dict,warm_start, scoring)
design["model"].train_and_score(design["data"], design["labels"], verbose, interpret, warm_start, export_pipeline)
design["model"].interpret()
return design["model"].python_object() | AI-Starter | /AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/dxc/ai/run_model/run_model.py | run_model.py |
# def remove_stop_words(data):
# words = word_tokenize(str(data))
# new_text = ""
# for w in words:
# if w not in stopwords.words('english'):
# new_text = new_text + " " + w
# return new_text
# def remove_emails(x):
# return re.sub(r'([a-z0-9+._-]+@[a-z0-9+._-]+\.[a-z0-9+_-]+)',"", x)
# def remove_urls(x):
# return re.sub(r'(http|https|ftp|ssh)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?', '' , x)
# def remove_html_tags(x):
# return BeautifulSoup(x, 'lxml').get_text().strip()
# def remove_rt(x):
# return re.sub(r'\brt\b', '', x).strip()
# def remove_accented_chars(x):
# x = unicodedata.normalize('NFKD', x).encode('ascii', 'ignore').decode('utf-8', 'ignore')
# return x
# def remove_special_chars(x):
# x = re.sub(r'[^\w ]+', "", x)
# x = ' '.join(x.split())
# return x
# def get_text_clean(x):
# x = str(x).lower().replace('\\', '').replace('_', ' ')
# x = remove_emails(x)
# x = remove_urls(x)
# x = remove_html_tags(x)
# x = remove_rt(x)
# x = remove_accented_chars(x)
# x = remove_special_chars(x)
# x = re.sub("(.)\\1{2,}", "\\1", x)
# x = remove_stop_words(x)
# return x
# def kmeans_sentiment_analyzer(df_TB):
# #tokenize the reviews from dataframe
# all_words = [nltk.word_tokenize(sent) for sent in df_TB[df_TB.columns[0]]]
# #generate vectors for tokenized words
# word2vec = Word2Vec(all_words, min_count=2)
# vocabulary = word2vec.wv.vocab
# #User will be allowed to pass parameters in further enhancement
# model = KMeans(n_clusters=2, max_iter=1000, random_state=True, n_init=50).fit(X=word2vec.wv.vectors.astype('double'))
# #Logic need to be enhanced further to generalize the cluster indication
# positive_cluster_index = 0
# words = pd.DataFrame(word2vec.wv.vocab.keys())
# words.columns = ['words']
# words['vectors'] = words.words.apply(lambda x: word2vec[f'{x}'])
# words['cluster'] = words.vectors.apply(lambda x: model.predict([np.array(x)]))
# words.cluster = words.cluster.apply(lambda x: x[0])
# words['cluster_value'] = [1 if i==positive_cluster_index else -1 for i in words.cluster]
# words['closeness_score'] = words.apply(lambda x: 1/(model.transform([x.vectors]).min()), axis=1)
# words['sentiment_coeff'] = words.closeness_score * words.cluster_value
# words[['words', 'sentiment_coeff']].to_csv('sentiment_dictionary.csv', index=False)
# print('words processed and coefficients stored in sentiment_dictionary.csv')
# def create_tfidf_dictionary(x, transformed_file, features):
# vector_coo = transformed_file[x.name].tocoo()
# vector_coo.col = features.iloc[vector_coo.col].values
# dict_from_coo = dict(zip(vector_coo.col, vector_coo.data))
# return dict_from_coo
# def replace_tfidf_words(x, transformed_file, features):
# dictionary = create_tfidf_dictionary(x, transformed_file, features)
# return list(map(lambda y:dictionary[f'{y}'], x.Reviews.split()))
# def replace_sentiment_words(word, sentiment_dict):
# try:
# out = sentiment_dict[word]
# except KeyError:
# out = 0
# return out
# def kmeans_sentiment_predictor(df_predict):
# sentiment_map = pd.read_csv('sentiment_dictionary.csv')
# sentiment_dict = dict(zip(sentiment_map.words.values, sentiment_map.sentiment_coeff.values))
# file_weighting = df_predict.copy()
# file_weighting.columns = ['Reviews', 'Sentiment']
# tfidf = TfidfVectorizer(tokenizer=lambda y: y.split(), norm=None)
# tfidf.fit(file_weighting.Reviews)
# features = pd.Series(tfidf.get_feature_names())
# transformed = tfidf.transform(file_weighting.Reviews)
# replaced_tfidf_scores = file_weighting.apply(lambda x: replace_tfidf_words(x, transformed, features), axis=1)
# replaced_closeness_scores = file_weighting.Reviews.apply(lambda x: list(map(lambda y: replace_sentiment_words(y,
# sentiment_dict), x.split())))
# new_df = pd.DataFrame(data=[replaced_closeness_scores, replaced_tfidf_scores,
# file_weighting.Reviews,file_weighting.Sentiment]).T
# new_df.columns = ['sentiment_coeff', 'tfidf_scores', 'sentence', 'sentiment']
# new_df['sentiment_rate'] = new_df.apply(lambda x: np.array(x.loc['sentiment_coeff']) @
# np.array(x.loc['tfidf_scores']), axis=1)
# new_df['prediction'] = (new_df.sentiment_rate>0).astype('int8')
# new_df['sentiment'] = [1 if i==1 else 0 for i in new_df.sentiment]
# return new_df[['sentence','prediction']] | AI-Starter | /AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/dxc/ai/unsupervised_sentiment_analysis/kmeans_sentiment_analysis.py | kmeans_sentiment_analysis.py |
from .define_model_components import define_critic_layers
from .define_model_components import define_layers
from .define_model_components import define_memory
from .define_model_components import define_policy
from .define_model_components import define_random_process
import numpy as np
import gym
from gym import spaces
import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Activation, Flatten, Input, Concatenate
from tensorflow.keras.optimizers import Adam
import rl
from rl.agents.dqn import DQNAgent
from rl.agents import DDPGAgent
from rl.agents import SARSAAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
from rl.random import OrnsteinUhlenbeckProcess
def rl_helper(env, model_name, saved_model_name="model", steps=50000, test_steps=5, visualize=False, hidden_layers=3, critic_hidden_layers=3):
if (model_name == "DDPG"):
tf.compat.v1.enable_eager_execution()
tf_keras_ddpg(env, saved_model_name, steps)
else:
tf.compat.v1.disable_eager_execution()
keras_rl(env, model_name, saved_model_name, steps, test_steps, visualize, hidden_layers, critic_hidden_layers)
def keras_rl(env, model_name, saved_model_name="model", steps=50000, test_steps=5, visualize=False, hidden_layers=3, critic_hidden_layers=3):
nb_actions = 0
if (model_name == "DQN" or model_name == "SARSA"):
nb_actions = env.action_space.n
elif (model_name == "DDPG"):
nb_actions = env.action_space.shape[0]
model_structure = define_layers(
env, nb_actions, num_of_hidden_layers=hidden_layers)
memory = define_memory()
policy = define_policy(model_name)
if (model_name == "DQN"):
model = DQNAgent(model=model_structure, nb_actions=nb_actions, memory=memory, nb_steps_warmup=100,
enable_double_dqn=True, dueling_type='avg', target_model_update=1e-2)
elif (model_name == "SARSA"):
model = SARSAAgent(
model=model_structure, nb_actions=nb_actions, nb_steps_warmup=10, policy=policy)
elif (model_name == "DDPG"):
action_input, critic_layers = define_critic_layers(
env, num_of_hidden_layers=critic_hidden_layers)
random_process = define_random_process(nb_actions)
model = DDPGAgent(nb_actions=nb_actions, actor=model_structure, critic=critic_layers, critic_action_input=action_input,
memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,
random_process=random_process, gamma=.99, target_model_update=1e-3)
model.compile(Adam(lr=1e-3), metrics=['mae'])
model.fit(env, nb_steps=steps, visualize=False, verbose=2)
model.save_weights('{}.h5f'.format(model_name), overwrite=True)
model.test(env, nb_episodes=test_steps, visualize=visualize)
def tf_keras_ddpg(env, saved_model_name, steps=50000):
num_states = env.observation_space.shape[0]
print("Size of State Space -> {}".format(num_states))
num_actions = env.action_space.shape[0]
print("Size of Action Space -> {}".format(num_actions))
upper_bound = env.action_space.high[0]
lower_bound = env.action_space.low[0]
print("Max Value of Action -> {}".format(upper_bound))
print("Min Value of Action -> {}".format(lower_bound))
def get_actor():
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
inputs = layers.Input(shape=(num_states,))
out = layers.Dense(256, activation="relu")(inputs)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1, activation="tanh", kernel_initializer=last_init)(out)
# Our upper bound is 2.0 for Pendulum.
outputs = outputs * upper_bound
model = tf.keras.Model(inputs, outputs)
return model
def get_critic():
# State as input
state_input = layers.Input(shape=(num_states))
state_out = layers.Dense(16, activation="relu")(state_input)
state_out = layers.Dense(32, activation="relu")(state_out)
# Action as input
action_input = layers.Input(shape=(num_actions))
action_out = layers.Dense(32, activation="relu")(action_input)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(256, activation="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
def policy(state, noise_object):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + noise
# We make sure action is within bounds
legal_action = np.clip(sampled_actions, lower_bound, upper_bound)
return [np.squeeze(legal_action)]
class OUActionNoise:
def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):
self.theta = theta
self.mean = mean
self.std_dev = std_deviation
self.dt = dt
self.x_initial = x_initial
self.reset()
def __call__(self):
# Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
x = (
self.x_prev
+ self.theta * (self.mean - self.x_prev) * self.dt
+ self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)
)
# Store x into x_prev
# Makes next noise dependent on current one
self.x_prev = x
return x
def reset(self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)
class Buffer:
def __init__(self, buffer_capacity=100000, batch_size=64):
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch,
):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = target_actor(next_state_batch, training=True)
y = reward_batch + gamma * target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)
critic_optimizer.apply_gradients(
zip(critic_grad, critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = actor_model(state_batch, training=True)
critic_value = critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)
actor_optimizer.apply_gradients(
zip(actor_grad, actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
actor_model = get_actor()
critic_model = get_critic()
target_actor = get_actor()
target_critic = get_critic()
# Making the weights equal initially
target_actor.set_weights(actor_model.get_weights())
target_critic.set_weights(critic_model.get_weights())
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
total_episodes = steps
# Discount factor for future rewards
gamma = 0.99
# Used to update target networks
tau = 0.005
buffer = Buffer(50000, 64)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# Takes about 4 min to train
for ep in range(total_episodes):
prev_state = env.reset()
episodic_reward = 0
while True:
# Uncomment this to see the Actor in action
# But not in a python notebook.
# env.render()
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
action = policy(tf_prev_state, ou_noise)
# Recieve state and reward from environment.
state, reward, done, info = env.step(action)
buffer.record((prev_state, action, reward, state))
episodic_reward += reward
buffer.learn()
update_target(target_actor.variables, actor_model.variables, tau)
update_target(target_critic.variables, critic_model.variables, tau)
# End this episode when `done` is True
if done:
break
prev_state = state
ep_reward_list.append(episodic_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep, avg_reward))
avg_reward_list.append(avg_reward)
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
plt.show()
# Save the weights
actor_model.save_weights("{}_actor.h5".format(saved_model_name))
critic_model.save_weights("{}_critic.h5".format(saved_model_name))
target_actor.save_weights("{}_target_actor.h5".format(saved_model_name))
target_critic.save_weights("{}_target_critic.h5".format(saved_model_name)) | AI-Starter | /AI-Starter-3.0.7.tar.gz/AI-Starter-3.0.7/dxc/rl/rl_helper_function/helper_function.py | helper_function.py |
import html2text
import os
import openai
import tiktoken
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Set OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_number_of_tokens(text,model="gpt-3.5-turbo"):
encoding = tiktoken.encoding_for_model(model)
return len(encoding.encode(text))
def split_text_logically(A: str):
"""Split the text logically by sections, not by characters. If there is only one section uses subsections and so on.
It works on markdown text. and it can be used progressively to split the text in smaller and smaller pieces as needed."""
half_len = len(A) // 2
for char in ['\n# ','\n## ','\n### ','\n#### ','\n##### ','\n###### ', '.\n', '\n', '.', ', ', ' ']:
index_left = A.rfind(char, 10, half_len+10)
index_right = A.find(char, half_len-10, len(A)-10)
if index_left == -1:
if index_right == -1:
continue
else:
split_indices=index_right
break
else:
if index_right == -1:
split_indices=index_left
break
else:
distance_left=half_len-index_left
distance_right=index_right-half_len
if distance_left<distance_right:
split_indices=index_left
break
else:
split_indices=index_right
break
part_a=A[:split_indices]
part_b=A[split_indices:]
return part_a, part_b
def _recursive_merge_summarises_new(summary_1: str, summary_2: str):
result="\n# \n"+summary_1+"\n# \n"+summary_2
return result
def _recursive_summarize_text(text: str, filter: str = None, summary_length: int = 500,model="gpt-3.5-turbo",max_number_tokens=4096):
if filter:
prompt_content = f"""I need you to summarise some text which is part of a bigger document.
You should summarise the text by filtering out what does not follows this guidelines:
{filter}.
The content is here:
{text}
Now create a summary. The result should not be longer than {summary_length} words.
Don't speak about the text, say directly what the text says.
Also do not add conclusions or morals to the summary. Stick to the facts.
Don't repeat yourself"""
else:
prompt_content = f"""I need you to summarise some text which is part of a bigger document.
The content is here:
{text}
Now create a summary. The result should not be longer than {summary_length} words.
Don't speak about the text, say directly what the text says.
Also do not add conclusions or morals to the summary. Stick to the facts.
Don't repeat yourself"""
n_tokens=get_number_of_tokens(prompt_content,model=model)
if n_tokens>max_number_tokens:
text_1,text_2=split_text_logically(text)
summary_1=_recursive_summarize_text(text_1, filter, summary_length=summary_length,model=model)
summary_2=_recursive_summarize_text(text_2, filter, summary_length=summary_length,model=model)
summary=_recursive_merge_summarises_new(summary_1, summary_2)
return summary
try:
response = openai.ChatCompletion.create(
model=model,
messages=[ { "role": "user", "content": prompt_content}]
)
except Exception as e:
print (e)
input("Press any key to continue")
return _recursive_summarize_text(text, filter, summary_length,model=model)
finish_reason=response.choices[0].finish_reason
if finish_reason == "length":
text_1,text_2=split_text_logically(text)
summary_1=_recursive_summarize_text(text_1, filter, summary_length=summary_length,model=model)
summary_2=_recursive_summarize_text(text_2, filter, summary_length=summary_length,model=model)
summary=_recursive_merge_summarises_new(summary_1, summary_2)
return summary
summary = response.choices[0].message.content
return summary
def html_to_markdown(html_content,ignore_links=True):
html_converter = html2text.HTML2Text()
html_converter.ignore_links = ignore_links
html_converter.ignore_images = True
markdown_text = html_converter.handle(html_content)
return markdown_text
def summarize_text(text:str, filter:str=None, summary_length:int=500,model="gpt-3.5-turbo",max_number_tokens=4096):
markdown_content = html_to_markdown(text)
summary=_recursive_summarize_text(markdown_content, filter, summary_length,model=model,max_number_tokens=max_number_tokens)
while (len(summary.split())>summary_length*1.5):
summary=_recursive_summarize_text(summary, filter,summary_length,model=model)
return summary | AI-Summarizer | /AI_Summarizer-1.0.4-py3-none-any.whl/ai_summarizer/main.py | main.py |
import pandas as pd
import sys
import qdarkstyle
import sqlalchemy
import textwrap
from PySide2.QtCore import (QAbstractTableModel, QModelIndex, Slot, QPersistentModelIndex, Qt)
from PySide2.QtWidgets import (QDesktopWidget, QAction, QApplication, QHBoxLayout, QHeaderView, QMainWindow,
QSizePolicy, QTableView, QWidget, QCheckBox, QTableWidgetItem)
from PySide2.QtGui import QFont
sys.path.append('../')
from configs.config import filenames
final_orders = filenames.final_orders
def read_sql_server():
engine = sqlalchemy.create_engine(
"mssql+pyodbc://altius:[email protected]/DemoDatasets?driver=SQL+Server")
query = textwrap.dedent('''
SELECT *
FROM orders''')
df = pd.read_sql_query(query, engine)
order = df["OrderID"]
itemid = df["ItemID"]
table = df["TableNo"]
item = df["Item"]
quantity = df["Quantity"]
number = df["Number"]
time = df["Time"]
return order, itemid,table, item, quantity, number, time
# def read_data(fname):
# df = pd.read_csv(fname)
# table = df["Table"]
# order = df["Order"]
# time = df["Time"]
# return table, order, time
class MainWindow(QMainWindow):
def __init__(self, widget):
super().__init__()
#QMainWindow.__init__(self)
self.setWindowTitle("AI Waiter Orders")
# Menu
self.menu = self.menuBar()
self.file_menu = self.menu.addMenu("File")
# Exit QAction
exit_action = QAction("Exit", self)
exit_action.setShortcut("Ctrl+Q")
exit_action.triggered.connect(self.close)
self.file_menu.addAction(exit_action)
# Status Bar
self.status = self.statusBar()
self.status.showMessage("Orders")
# Window dimensions
# geometry = QDesktopWidget().availableGeometry(self)
# self.setFixedSize(geometry.width() * 1, geometry.height() * 1)
self.setGeometry(0, 0, 1550, 800)
self.setCentralWidget(widget)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
#Font
self.setFont(QFont('SansSerif', 14))
# @Slot()
# def exit_app(self, checked):
# sys.exit()
class CustomTableModel(QAbstractTableModel):
def __init__(self, data=None):
QAbstractTableModel.__init__(self)
self.load_data(data)
self.checks = {}
def checkState(self, index):
if index in self.checks.keys():
return self.checks[index]
else:
return Qt.Unchecked
def load_data(self, data):
self.input_order = data[0].values
self.input_itemid = data[1].values
self.input_table = data[2].values
self.input_item = data[3].values
self.input_quantity = data[4].values
self.input_number = data[5].values
self.input_time = data[6].values
self.column_count = 8
self.row_count = len(self.input_order)
def rowCount(self, parent=QModelIndex()):
return self.row_count
def columnCount(self, parent=QModelIndex()):
return self.column_count
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
return ("Served", "OrderID", "ItemID", "TableNo", "Item", "Quantity", "Number", "Time")[section]
else:
return "{}".format(section)
def data(self, index, role = Qt.DisplayRole):
column = index.column()
row = index.row()
if role == Qt.DisplayRole or role == Qt.EditRole:
if column == 1:
raw_date = self.input_order[row]
date = "{}".format(raw_date)
return date
elif column == 2:
return "{}".format(self.input_itemid[row])
elif column == 3:
return "{}".format(self.input_table[row])
elif column == 4:
return "{}".format(self.input_item[row])
elif column == 5:
return "{}".format(self.input_quantity[row])
elif column == 6:
return "{}".format(self.input_number[row])
elif column == 7:
return "{}".format(self.input_time[row])
else:
return QCheckBox('')
elif role == Qt.CheckStateRole and column == 0:
return self.checkState(QPersistentModelIndex(index))
elif role == Qt.TextAlignmentRole:
return Qt.AlignCenter
return None
def setData(self, index, value, role=Qt.EditRole):
if not index.isValid():
return False
if role == Qt.CheckStateRole:
self.checks[QPersistentModelIndex(index)] = value
return True
return False
def flags(self, index):
fl = QAbstractTableModel.flags(self, index)
if index.column() == 0:
fl |= Qt.ItemIsEditable | Qt.ItemIsUserCheckable
return fl
class Widget(QWidget):
def __init__(self, data):
super().__init__()
#QWidget.__init__(self)
# Getting the Model
self.model = CustomTableModel(data)
# Creating a QTableView
self.table_view = QTableView()
self.table_view.setModel(self.model)
# QTableView Headers
self.horizontal_header = self.table_view.horizontalHeader()
self.vertical_header = self.table_view.verticalHeader()
self.horizontal_header.setSectionResizeMode(0,QHeaderView.ResizeToContents)
self.horizontal_header.setSectionResizeMode(1,QHeaderView.ResizeToContents)
self.horizontal_header.setSectionResizeMode(2,QHeaderView.ResizeToContents)
self.horizontal_header.setSectionResizeMode(3,QHeaderView.ResizeToContents)
self.horizontal_header.setSectionResizeMode(4,QHeaderView.Stretch)
self.horizontal_header.setSectionResizeMode(5,QHeaderView.ResizeToContents)
self.horizontal_header.setSectionResizeMode(6,QHeaderView.ResizeToContents)
self.horizontal_header.setSectionResizeMode(7,QHeaderView.ResizeToContents)
self.vertical_header.setSectionResizeMode(QHeaderView.Interactive)
self.horizontal_header.setStyleSheet("QHeaderView { font-size: 18pt; font: bold 24px;}")
# QWidget Layout
self.main_layout = QHBoxLayout()
size = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.main_layout.setContentsMargins(0,0,0,0)
#Align Checkbox
self.main_layout.setContentsMargins(0,0,0,0)
## Left layout
size.setHorizontalStretch(1)
self.table_view.setSizePolicy(size)
self.main_layout.addWidget(self.table_view)
# Set the layout to the QWidget
self.setLayout(self.main_layout)
def main(final_orders):
data = read_sql_server()
#app2 = QApplication()
widget = Widget(data)
window2 = MainWindow(widget)
#app.setStyleSheet(qdarkstyle.load_stylesheet_pyside2())
#window2.show()
#sys.exit(app2.exec_())
return window2
if __name__ == "__main__":
data = read_sql_server()
# Qt Application
app = QApplication()
# QWidget
widget = Widget(data)
# QMainWindow using QWidget as central widget
window = MainWindow(widget)
# setup stylesheet
app.setStyleSheet(qdarkstyle.load_stylesheet_pyside2())
window.show()
sys.exit(app.exec_()) | AI-waiter | /AI_waiter-1.0.3.tar.gz/AI_waiter-1.0.3/src/gui/display_window2.py | display_window2.py |
import pandas as pd
import re
from nltk.corpus import stopwords
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer, LancasterStemmer
"""
TODO: return most likely specific quantity to make sure that
the term with the highest number of tokens is returned.
clean speech text, so that e.g. fritz-kola -> fritz kola
"""
def clean_text(text):
text = str(text).lower() #lowercase
text = re.sub('\'s', 's', text)
text = re.sub('\’s', 's', text)
text = re.sub('\W', ' ', text) #remove punctuation
text = re.sub('\d+','', text) #remove digits
text = re.sub('\s+', ' ', text) #remove whitespace
text = text.strip(' ')
return text
def remove_stopwords(text):
stop_words = stopwords.words('english')
# add words to stopwords
add_words = ['de', 'la', 'order', 'could']
stop_words += add_words
word_tokens = word_tokenize(text)
filtered = [w for w in word_tokens if not w in stop_words]
return ' '.join([w for w in filtered])
def lemmatize_text(text):
# Lemmatizing
lemmatizer = WordNetLemmatizer()
lemmatized_output = ' '.join([lemmatizer.lemmatize(w) for w in text.split()])
#Stemming
#stemmer = LancasterStemmer()
#stemmed_words = [stemmer.stem(word) for word in text]
#text = " ".join(stemmed_words)
return lemmatized_output
def get_quantities(menu):
"""
separate items in quantity columns into a list
"""
new_quant = []
for q in menu['Quantities']:
try:
new_quant.append(q.split(', '))
except:
new_quant.append([])
menu['Quantities'] = new_quant
return menu
def main(menu_file):
"""
menu file is a csv file containing the menu items
"""
data = pd.read_csv(menu_file, encoding = 'utf-8')
data.rename(columns={data.columns[1]: "Specific term"}, inplace=True)
data["Further details"] = data["Further details"].apply(clean_text)
data = get_quantities(data)
data['Quantities'] = data['Quantities'].apply(clean_text)
data["General term"] = data["General term"].apply(clean_text)
data["Specific term"] = data["Specific term"].apply(clean_text)
data["description_tokens"] = data["Further details"].apply(word_tokenize)
#data["quantities_tokens"] = data["Quantities"].apply(word_tokenize)
data["general_tokens"] = data["General term"].apply(word_tokenize)
data["specific_tokens"] = data["Specific term"].apply(word_tokenize)
data["all_tokens"] = data["specific_tokens"] + data["general_tokens"] + data["description_tokens"]
data.to_csv('menu_data.csv')
return data
if __name__ == "__main__":
menu_file = ('../menus/menu.csv')
tokenized_data = main(menu_file)
print(tokenized_data.head()) | AI-waiter | /AI_waiter-1.0.3.tar.gz/AI_waiter-1.0.3/src/convert_text/get_menu_items.py | get_menu_items.py |
from nltk import word_tokenize
from statistics import mode
import sys
sys.path.append('../../')
from src.convert_text.get_menu_items import lemmatize_text, clean_text, remove_stopwords
from src.configs.config import filenames
import textwrap
import json
import ast
from collections import Counter
import pandas as pd
import random
import nltk
import sqlalchemy
import random
from datetime import datetime
import urllib3, time
import requests
import json
from random import randint
#from word2number import w2n
import sqlalchemy
def clean_speech(text):
"""
Cleans the speech to text conversione
Args:
text : string of text converted from speech
Returns:
new_text : list of strings
"""
new_text = clean_text(text)
return lemmatize_text(new_text)
def search_for_general_item(text, menu):
"""
search the cleaned text for an item on the menu
Args:
text : speech converted into text
menu : menu data from csv file
Return:
item_idx (int): possible indices of menu item
x : corresponding menu item
"""
# is the general term in the order?
general_terms = menu['General term'].unique()
possible_items = []
for w in text:
possible_items = possible_items + [x for x in general_terms if w in x]
# count most frequently occuring item
if possible_items != []:
try:
item = mode(possible_items)
except:
print("more than one possible item")
else:
item = []
return item
def search_for_specific_item(text, menu):
"""
matches the indices for the general item to a specific item
Args:
text : speech converted to text
menu : menu dataframe
idx : list of possible general items
TODO: need to improve so it matches up exact word for word
"""
specific_items = menu['Specific term'].apply(lambda x: remove_stopwords(x))
print(specific_items)
possible_items = []
print('text = ',text)
for w in text:
possible_items += [x for x in specific_items if w in x]
if possible_items == []:
print('no match to specific item found')
return []
else:
try:
return mode(possible_items)
except:
print("More than one possible item")
print(possible_items)
return []
def match_item_to_order(text, menu):
"""
instead of seraching for an item in the text, search through all the items
to see if they appear in the text
"""
# clean specific items
menu['Specific term'] = menu['Specific term'].apply(lambda x: remove_stopwords(x))
text = word_tokenize(remove_stopwords(' '.join(text)))
possible_items = []
count_terms = []
for term in menu['Specific term']:
count = 0
for token in word_tokenize(term):
if any(token == w for w in text):
count +=1
if count != 0:
possible_items.append(term)
count_terms.append(count)
if possible_items == []:
return []
else:
try:
return mode(possible_items)
except:
print('count_terms = ',count_terms)
print('possible_items = ', possible_items)
idx = count_terms.index(max(count_terms))
for i in range(len(count_terms)):
if i != idx:
if count_terms[i] == max(count_terms):
return 'not sure'
return possible_items[idx]
def search_for_quantity(text,menu, item_index):
# TODO : find numeric values
quantity_list = menu['Quantities'].loc[item_index]
quantity = ([w for w in text if w in quantity_list])
return quantity
def search_for_number(text):
pos = nltk.pos_tag(text)
for i in range(len(pos)):
word, pos_tag = pos[i]
if pos_tag == 'CD':
return word
else:
return 'one'
def get_alternative_text(audio, recognizer):
alt_dict = recognizer.recognize_google(audio, show_all = True)
alternatives = alt_dict['alternative']
transcript = []
for i in alternatives:
transcript.append(i['transcript'])
return transcript
def final_order(order_data, timestamp):
"""
determine final order, as most likely translation
"""
max_items = max(order_data['sentence_id'])
order_itemid = []
order_table = []
order_items = []
order_quantity = []
order_timestamp = []
order_number = []
# iterate through item orders
for i in range(max_items + 1):
# find item
item_list = order_data[order_data['sentence_id'] == i]['item'].values
# if all items are the same, choice is obvious
if all(x == item_list[0] for x in item_list):
item = item_list[0]
else:
# TODO : find most commonly occuring item
try:
item = mode(item_list)
if item == 'not sure':
item = mode(item_list[item_list != 'not sure'])
except:
item = item_list[0]
# find quantity
quantity_list = order_data[order_data['sentence_id'] == i]['quantity'].values
try:
quantity_list = [q[0] for q in quantity_list]
if all(x == quantity_list[0] for x in quantity_list):
quantity = quantity_list[0]
else:
try:
quantity = mode(quantity_list)
except:
quantity = quantity_list[0]
except:
quantity = 'missing quantity'
# find number
number_list = order_data[order_data['sentence_id'] == i]['number'].values
try:
if all(x == number_list[0] for x in number_list):
number = number_list[0]
else:
number = float('NaN')
except:
number = float('NaN')
order_itemid.append(random.randint(1000,2000))
order_items.append(item)
order_quantity.append(quantity)
order_timestamp.append(timestamp)
order_table.append(0)
order_number.append(number)
# TODO : what if not all items/quantites are the same? How to pick most likely order
#Counts how many times the script has ran to give a unique item ID
def get_var_value(filename="varstore.dat"):
with open(filename, "a+") as f:
f.seek(0)
val = int(f.read() or 0) + 1
f.seek(0)
f.truncate()
f.write(str(val))
return val
your_counter = get_var_value()
final_order_dict = {'OrderID': your_counter
,'TableNo': random.randint(1,25)
,'ItemID': order_itemid
,'Item': order_items
,'Quantity': order_quantity
,'Number': order_number
,'Time': order_timestamp}
final_order_data = pd.DataFrame(final_order_dict)
print(final_order_data)
return final_order_data
def main(audio, timestamp, recognizer, menu):
"""
Args:
audio : output from SpeechConvert()
recognizer : speech recognition class
menu : dataframe containing menu information
returns
command
"""
# get text and clean - look for alternatives
speech_list = get_alternative_text(audio, recognizer)
#speech_text = 'could i get a large glass of pinot grigio and a small glass of sauvignon blanc'
order_list = []
sentence_list = []
item_list = []
order_id = []
sentence_id = []
quantity_list = []
number_list = []
for i, speech_text in enumerate(speech_list):
# split order into sentences with the word 'and'
print('whole order = ',speech_text)
print("")
order = []
speech = speech_text.split('and')
for j, sentence in enumerate(speech):
print('sentence =', sentence)
new_text = clean_speech(sentence)
new_text = remove_stopwords(new_text)
if len(new_text) == 1:
print("only one word found")
sys.exit()
new_text = word_tokenize(new_text)
# try and find specific item
item_name = match_item_to_order(new_text, menu)
# if specific item found, search for quantity
if item_name != []:
try:
item_index = menu.loc[menu['Specific term'] == item_name].index[0] # if multiple items
quantity = search_for_quantity(new_text, menu, item_index)
except:
quantity = 'item not clear'
else:
# if specific item not found perhaps ordering a general item?
general_item = search_for_general_item(new_text, menu)
number = search_for_number(new_text)
print('Item: ', item_name, '. Quantity: ', quantity)
print("")
order.append([item_name, quantity])
order_list.append(speech_text)
sentence_list.append(sentence)
item_list.append(item_name)
sentence_id.append(j)
order_id.append(i)
quantity_list.append(quantity)
number_list.append(number)
print("--------------------")
if order != []:
exit
# save order to file
save_dir = filenames.order_dir
filename = 'full_order.csv'
dct = {'translation_id' : order_id
,'sentence_id' : sentence_id
,'order' : order_list
,'sentence' : sentence_list
,'item' : item_list
,'quantity': quantity_list
,'number' : number_list}
full_order_data = pd.DataFrame(dct)
full_order_data.to_csv(save_dir + filename)
order = final_order(full_order_data, timestamp)
order.to_csv(save_dir + 'final_order.csv', index = False)
###########################
#Save order to SQL table
engine = sqlalchemy.create_engine(
"mssql+pyodbc://altius:[email protected]/DemoDatasets?driver=SQL+Server")
order.to_sql(name='orders', con=engine, if_exists='append', index=False)
print('order saved')
return None | AI-waiter | /AI_waiter-1.0.3.tar.gz/AI_waiter-1.0.3/src/convert_text/text_to_command.py | text_to_command.py |
import tkinter
import tkinter as tk
import tkinter.messagebox
import pyaudio
import wave
import os
import sys
sys.path.append('../../')
from datetime import datetime
import keyboard
class RecAUD_space:
"""
Record audio interface.
Options to start, stop and cancel recording
It saves to file automatically when you stop recording.
Operates by using the keyboard as opposed to pressing buttons
"""
def __init__(self, file_name, chunk=3024, frmat=pyaudio.paInt16, channels=2, rate=44100, py=pyaudio.PyAudio()):
# Start Tkinter and set Title
# set to hit space to record and stop record
# esc to cancel?
# output table number, order, time ordered
# slide on what it's doing with confidence scores etc.
self.main = tkinter.Tk()
self.collections = []
self.main.geometry('300x150') # size of buttons
self.main.title('Record') # title of interface
self.CHUNK = chunk
self.FORMAT = frmat
self.CHANNELS = channels
self.RATE = rate
self.p = py
self.frames = []
self.st = 1 # start counter
self.stream = self.p.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK)
current_time = str(datetime.now())
current_time = "_".join(current_time.split()).replace(":","-")
self.current_time = current_time[:-7]
self.file_name = file_name
self.labelframe = tkinter.LabelFrame(self.main, text = "Could I take your order?")
self.labelframe.unbind("<space>")
self.labelframe.bind("<space>", self.start_record)
#self.labelframe.bind("<space>", self.stop)
self.labelframe.bind("<Escape>", self.cancel_record)
self.labelframe.pack(fill=tk.BOTH, expand = "yes")
self.label = tkinter.Label(self.labelframe
, text = "Press space bar to record, and again to stop.\n Press ESC to cancel.")
self.label.pack()
self.labelframe.focus_set()
# Set Frames
self.labelframe.mainloop()
def start_record(self, event):
"""
Start recording audio command
"""
self.st = 1
self.frames = []
stream = self.p.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK)
#print("recording your order")
while self.st == 1:
data = stream.read(self.CHUNK)
print("recording your order")
self.frames.append(data)
self.main.update()
if keyboard.is_pressed('space'):
#self.labelframe.unbind("<space>")
self.st = 0
stream.close()
wf = wave.open(self.file_name, 'wb')
wf.setnchannels(self.CHANNELS)
wf.setsampwidth(self.p.get_sample_size(self.FORMAT))
wf.setframerate(self.RATE)
wf.writeframes(b''.join(self.frames))
wf.close()
print('Written to file' , self.file_name, 'at time:' + self.current_time)
self.main.destroy()
# stream.close()
# wf = wave.open(self.file_name, 'wb')
# wf.setnchannels(self.CHANNELS)
# wf.setsampwidth(self.p.get_sample_size(self.FORMAT))
# wf.setframerate(self.RATE)
# wf.writeframes(b''.join(self.frames))
# wf.close()
# print('Written to file' , self.file_name, 'at time:' + self.current_time)
# self.main.destroy()
return None
def stop(self, event):
"""
Stop recording audio command
sets counter to 0
"""
self.st = 0
return None
def cancel_record(self, event):
"""
Cancels recording without saving
Stops execution
"""
sys.exit("Cancelled Recording")
return None
class RecAUD:
"""
Record audio interface.
Options to start, stop and cancel recording
It saves to file automatically when you stop recording.
"""
def __init__(self, file_name, chunk=3024, frmat=pyaudio.paInt16, channels=2, rate=44100, py=pyaudio.PyAudio()):
# Start Tkinter and set Title
# set to hit space to record and stop record
# esc to cancel?
# output table number, order, time ordered
# slide on what it's doing with confidence scores etc.
# set to black
# output time
# save to csv
self.main = tkinter.Tk()
self.collections = []
self.main.geometry('300x150') # size of buttons
self.main.title('Record') # title of interface
self.CHUNK = chunk
self.FORMAT = frmat
self.CHANNELS = channels
self.RATE = rate
self.p = py
self.frames = []
self.st = 1 # start counter
self.stream = self.p.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK)
current_time = str(datetime.now())
current_time = "_".join(current_time.split()).replace(":","-")
self.current_time = current_time[:-7]
self.file_name = file_name
# Set Frames
self.buttons = tkinter.Frame(self.main, padx=20, pady=20)
# Pack Frame
self.buttons.pack(fill=tk.BOTH)
# Start, Stop and Cancel buttons
self.strt_rec = tkinter.Button(self.buttons, width=10, padx=10, pady=5,
text='Start Recording', command=lambda: self.start_record())
self.strt_rec.grid(row=0, column=0, padx=5, pady=5)
self.stop_rec = tkinter.Button(self.buttons, width=10, padx=10, pady=5,
text='Stop Recording', command=lambda: self.stop())
self.stop_rec.grid(row=0, column=1, columnspan=1, padx=5, pady=5)
self.cancel_rec = tkinter.Button(self.buttons, width=20, padx=0, pady=5,
text='Cancel', command=lambda: self.cancel_record())
self.cancel_rec.grid(row=1, column=0, columnspan=2, padx=50, pady=5)
tkinter.mainloop()
def start_record(self):
"""
Start recording audio command
"""
self.st = 1
self.frames = []
stream = self.p.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK)
while self.st == 1:
data = stream.read(self.CHUNK)
self.frames.append(data)
print("* recording")
self.main.update()
stream.close()
wf = wave.open(self.file_name, 'wb')
wf.setnchannels(self.CHANNELS)
wf.setsampwidth(self.p.get_sample_size(self.FORMAT))
wf.setframerate(self.RATE)
wf.writeframes(b''.join(self.frames))
wf.close()
print('Written to file' , self.file_name, 'at time:' + self.current_time)
self.main.destroy()
return None
def stop(self):
"""
Stop recording audio command
sets counter to 0
"""
self.st = 0
return None
def cancel_record(self):
"""
Cancels recording without saving
Stops execution
"""
sys.exit("Cancelled Recording")
return None
if __name__ == "__main__":
# Create an object of the ProgramGUI class to begin the program.
guiAUD = RecAUD_space('audio.wav') | AI-waiter | /AI_waiter-1.0.3.tar.gz/AI_waiter-1.0.3/src/convert_speech/record_audio.py | record_audio.py |
import qdarkstyle
import sys
from PySide2 import QtWidgets as qtw
from PySide2 import QtGui as qtg
from PySide2 import QtCore as qtc
from PySide2 import QtMultimedia as qtmm
sys.path.append('../../')
from src.configs.config import filenames
from datetime import datetime
import time
audiofile2 = filenames.audio_file2
class MainWindow(qtw.QMainWindow):
closed = qtc.Signal()
def __init__(self):
"""
MainWindow constructor:
Code in this method should define window properties, create backend resources, etc.
"""
super().__init__()
self.setWindowTitle("Record Order")
#Create Window layout with a sound widget
soundboard = qtw.QWidget()
soundboard.setLayout(qtw.QGridLayout())
self.setCentralWidget(soundboard)
sw = SoundWidget()
soundboard.layout().addWidget(sw)
# Status Bar
self.status = self.statusBar()
self.status.showMessage("AI Waiter")
#Window Dimensions
self.setSizePolicy(qtw.QSizePolicy.Expanding, qtw.QSizePolicy.MinimumExpanding)
# Code ends here
#self.show()
# @qtc.Slot()
# def exit_app(self, checked):
# sys.exit()
sw.sendorder_button.clicked.connect(self.close)
def closeEvent(self, event):
self.closed.emit()
super().closeEvent(event)
class PlayButton(qtw.QPushButton):
play_stylesheet = 'color: white;'
stop_stylesheet = 'background-color: darkred; color: white;'
def __init__(self):
super().__init__('Play')
self.setStyleSheet(self.play_stylesheet)
def on_state_changed(self, state):
if state == qtmm.QMediaPlayer.PlayingState:
self.setStyleSheet(self.stop_stylesheet)
self.setText('Stop')
else:
self.setStyleSheet(self.play_stylesheet)
self.setText('Play')
class RecordButton(qtw.QPushButton):
record_stylesheet = 'background-color: green; color: white;'
stop_stylesheet = 'background-color: darkred; color: white;'
def __init__(self):
super().__init__('Record')
self.setFont(qtg.QFont('Sans', 14, qtg.QFont.Bold))
self.setStyleSheet(self.record_stylesheet)
self.setSizePolicy(qtw.QSizePolicy.Expanding, qtw.QSizePolicy.Expanding)
def on_state_changed(self, state):
if state == qtmm.QAudioRecorder.RecordingState:
self.setStyleSheet(self.stop_stylesheet)
self.setText('Stop')
else:
self.setStyleSheet(self.record_stylesheet)
self.setText('Record')
class SendOrderButton(qtw.QPushButton):
button_stylesheet = 'background-color: blue; color: white;'
def __init__(self):
super().__init__('Send Order')
self.setFont(qtg.QFont('Sans', 14, qtg.QFont.Bold))
self.setSizePolicy(qtw.QSizePolicy.Expanding, qtw.QSizePolicy.Expanding)
self.setStyleSheet(self.button_stylesheet)
#self.clicked.connect(self.close)
def press_button(self):
if self.isEnabled():
self.setEnabled(False)
self.setText('Send Order')
else:
self.setEnabled(True)
self.setText('Sent')
class SoundWidget(qtw.QWidget):
def __init__(self):
super().__init__()
self.setLayout(qtw.QGridLayout())
# Title
self.label = qtw.QLabel("May I take your order?")
self.label.setFont(qtg.QFont('Sans', 18, qtg.QFont.Bold))
self.layout().addWidget(self.label, 0, 0, 1, 2)
# Playback Subtitle
self.label = qtw.QLabel("No Recording yet")
self.layout().addWidget(self.label, 1, 0, 1, 2)
#Play Button
self.play_button = PlayButton()
self.layout().addWidget(self.play_button, 3, 1, 1, 1)
self.player = qtmm.QMediaPlayer()
self.play_button.clicked.connect(self.on_playbutton)
self.player.stateChanged.connect(self.play_button.on_state_changed)
# Loading files Button
self.file_button = qtw.QPushButton('Load File', clicked=self.get_file)
self.layout().addWidget(self.file_button, 3, 0, 1, 1)
# Slider
self.position = qtw.QSlider(minimum=0, orientation=qtc.Qt.Horizontal)
self.layout().addWidget(self.position, 2, 0, 1, 2)
self.player.positionChanged.connect(self.position.setSliderPosition)
self.player.durationChanged.connect(self.position.setMaximum)
self.position.sliderMoved.connect(self.player.setPosition)
# Volume
# self.volume = qtw.QSlider(minimum=0,maximum=100,sliderPosition=10,
# orientation=qtc.Qt.Horizontal,
# #sliderMoved=self.player.setVolume
# )
# self.layout().addWidget(self.volume, 2, 0)
# Recording
self.recorder = qtmm.QAudioRecorder()
# supported audio inputs
print(self.recorder.audioInputs())
self.recorder.setAudioInput('default:')
#Overriding sound recording settings
settings = qtmm.QAudioEncoderSettings()
settings.setCodec('audio/pcm')
settings.setSampleRate(44100)
settings.setQuality(qtmm.QMultimedia.HighQuality)
self.recorder.setEncodingSettings(settings)
self.recorder.setContainerFormat('audio/x-wav')
self.recorder.setOutputLocation(qtc.QUrl.fromLocalFile(audiofile2))
#Record Button
self.record_button = RecordButton()
self.layout().addWidget(self.record_button, 4, 0, 1, 2)
self.recorder.stateChanged.connect(self.record_button.on_state_changed)
self.record_button.clicked.connect(self.on_recordbutton)
self.shortcut = qtw.QShortcut(qtg.QKeySequence("Space"), self)
self.shortcut.activated.connect(self.on_recordbutton)
#Send Order Button
self.sendorder_button = SendOrderButton()
self.sendorder_button.setShortcut(qtg.QKeySequence('Tab'))
self.layout().addWidget(self.sendorder_button, 5, 0, 1, 2)
#self.sendorder_button.clicked.connect(qtc.QCoreApplication.instance().quit)
#self.sendorder_button.clicked.connect(qtc.QCoreApplication.exit(0))
def on_playbutton(self):
if self.player.state() == qtmm.QMediaPlayer.PlayingState:
self.player.stop()
else:
self.player.play()
def set_file(self, url):
self.label.setText(url.fileName())
if url.scheme() == '':
url.setScheme('file')
content = qtmm.QMediaContent(url)
self.player.setMedia(content)
def get_file(self):
fn, _ = qtw.QFileDialog.getOpenFileUrl(
self,
"Select File",
qtc.QDir.homePath(),
"Audio files (*.wav *.flac *.mp3 *.ogg *.aiff);; All files (*)"
)
if fn:
self.set_file(fn)
def on_recordbutton(self):
if self.recorder.state() == qtmm.QMediaRecorder.RecordingState:
self.recorder.stop()
url = self.recorder.actualLocation()
self.set_file(url)
else:
self.recorder.record()
def main():
#app = qtw.QApplication(sys.argv)
window = MainWindow()
timestamp = time.strftime('%H:%M:%S')
#app.setStyleSheet(qdarkstyle.load_stylesheet_pyside2())
#window.show()
#app.exec_()
return window, timestamp
if __name__ == '__main__':
import sys
app = qtw.QApplication(sys.argv)
window = MainWindow()
app.setStyleSheet(qdarkstyle.load_stylesheet_pyside2())
window.show()
sys.exit(app.exec_()) | AI-waiter | /AI_waiter-1.0.3.tar.gz/AI_waiter-1.0.3/src/convert_speech/record_audio2.py | record_audio2.py |
import os as _os
import shlex as _shlex
import contextlib as _contextlib
import sys as _sys
import operator as _operator
import itertools as _itertools
import warnings as _warnings
import pkg_resources
import setuptools.command.test as orig
from setuptools import Distribution
@_contextlib.contextmanager
def _save_argv(repl=None):
saved = _sys.argv[:]
if repl is not None:
_sys.argv[:] = repl
try:
yield saved
finally:
_sys.argv[:] = saved
class CustomizedDist(Distribution):
allow_hosts = None
index_url = None
def fetch_build_egg(self, req):
"""Specialized version of Distribution.fetch_build_egg
that respects respects allow_hosts and index_url."""
from setuptools.command.easy_install import easy_install
dist = Distribution({'script_args': ['easy_install']})
dist.parse_config_files()
opts = dist.get_option_dict('easy_install')
keep = (
'find_links',
'site_dirs',
'index_url',
'optimize',
'site_dirs',
'allow_hosts',
)
for key in list(opts):
if key not in keep:
del opts[key] # don't use any other settings
if self.dependency_links:
links = self.dependency_links[:]
if 'find_links' in opts:
links = opts['find_links'][1].split() + links
opts['find_links'] = ('setup', links)
if self.allow_hosts:
opts['allow_hosts'] = ('test', self.allow_hosts)
if self.index_url:
opts['index_url'] = ('test', self.index_url)
install_dir_func = getattr(self, 'get_egg_cache_dir', _os.getcwd)
install_dir = install_dir_func()
cmd = easy_install(
dist,
args=["x"],
install_dir=install_dir,
exclude_scripts=True,
always_copy=False,
build_directory=None,
editable=False,
upgrade=False,
multi_version=True,
no_report=True,
user=False,
)
cmd.ensure_finalized()
return cmd.easy_install(req)
class PyTest(orig.test):
"""
>>> import setuptools
>>> dist = setuptools.Distribution()
>>> cmd = PyTest(dist)
"""
user_options = [
('extras', None, "Install (all) setuptools extras when running tests"),
(
'index-url=',
None,
"Specify an index url from which to retrieve dependencies",
),
(
'allow-hosts=',
None,
"Whitelist of comma-separated hosts to allow "
"when retrieving dependencies",
),
(
'addopts=',
None,
"Additional options to be passed verbatim to the pytest runner",
),
]
def initialize_options(self):
self.extras = False
self.index_url = None
self.allow_hosts = None
self.addopts = []
self.ensure_setuptools_version()
@staticmethod
def ensure_setuptools_version():
"""
Due to the fact that pytest-runner is often required (via
setup-requires directive) by toolchains that never invoke
it (i.e. they're only installing the package, not testing it),
instead of declaring the dependency in the package
metadata, assert the requirement at run time.
"""
pkg_resources.require('setuptools>=27.3')
def finalize_options(self):
if self.addopts:
self.addopts = _shlex.split(self.addopts)
@staticmethod
def marker_passes(marker):
"""
Given an environment marker, return True if the marker is valid
and matches this environment.
"""
return (
not marker
or not pkg_resources.invalid_marker(marker)
and pkg_resources.evaluate_marker(marker)
)
def install_dists(self, dist):
"""
Extend install_dists to include extras support
"""
return _itertools.chain(
orig.test.install_dists(dist), self.install_extra_dists(dist)
)
def install_extra_dists(self, dist):
"""
Install extras that are indicated by markers or
install all extras if '--extras' is indicated.
"""
extras_require = dist.extras_require or {}
spec_extras = (
(spec.partition(':'), reqs) for spec, reqs in extras_require.items()
)
matching_extras = (
reqs
for (name, sep, marker), reqs in spec_extras
# include unnamed extras or all if self.extras indicated
if (not name or self.extras)
# never include extras that fail to pass marker eval
and self.marker_passes(marker)
)
results = list(map(dist.fetch_build_eggs, matching_extras))
return _itertools.chain.from_iterable(results)
@staticmethod
def _warn_old_setuptools():
msg = (
"pytest-runner will stop working on this version of setuptools; "
"please upgrade to setuptools 30.4 or later or pin to "
"pytest-runner < 5."
)
ver_str = pkg_resources.get_distribution('setuptools').version
ver = pkg_resources.parse_version(ver_str)
if ver < pkg_resources.parse_version('30.4'):
_warnings.warn(msg)
def run(self):
"""
Override run to ensure requirements are available in this session (but
don't install them anywhere).
"""
self._warn_old_setuptools()
dist = CustomizedDist()
for attr in 'allow_hosts index_url'.split():
setattr(dist, attr, getattr(self, attr))
for attr in (
'dependency_links install_requires tests_require extras_require '
).split():
setattr(dist, attr, getattr(self.distribution, attr))
installed_dists = self.install_dists(dist)
if self.dry_run:
self.announce('skipping tests (dry run)')
return
paths = map(_operator.attrgetter('location'), installed_dists)
with self.paths_on_pythonpath(paths):
with self.project_on_sys_path():
return self.run_tests()
@property
def _argv(self):
return ['pytest'] + self.addopts
def run_tests(self):
"""
Invoke pytest, replacing argv. Return result code.
"""
with _save_argv(_sys.argv[:1] + self.addopts):
result_code = __import__('pytest').main()
if result_code:
raise SystemExit(result_code) | AI4SS | /AI4SS-0.0.1.tar.gz/AI4SS-0.0.1/.eggs/pytest_runner-6.0.0-py3.8.egg/ptr/__init__.py | __init__.py |
# AI4Water
[](https://github.com/AtrCheema/AI4Water/actions)
[](https://ai4water.readthedocs.io/en/latest/?badge=latest)
[](https://doi.org/10.5194/gmd-15-3021-2022)
[](https://pepy.tech/project/ai4water)
[](https://badge.fury.io/py/AI4Water)


A uniform and simplified framework for rapid experimentation with deep leaning and machine learning based models
for time series and tabular data. To put into Andrej Karapathy's [words](https://twitter.com/karpathy/status/1350503355299205120)
`Because deep learning is so empirical, success in it is to a large extent proportional to raw experimental throughput,
the ability to babysit a large number of experiments at once, staring at plots and tweaking/re-launching what works.
This is necessary, but not sufficient.`
The specific purposes of the repository are
- compliment the functionality of `keras`/`pytorch`/`sklearn` by making pre and
post-processing easier for time-series prediction/classification problems (also holds
true for any tabular data).
- save, load/reload or build models from readable json file. This repository
provides a framework to build layered models using python dictionary and with
several helper tools which fasten the process of modeling time-series forecasting.
- provide a uniform interface for optimizing hyper-parameters for
[skopt](https://scikit-optimize.github.io/stable/index.html);
[sklearn](https://scikit-learn.org/stable/modules/classes.html#hyper-parameter-optimizers)
based [grid](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)
and [random](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html);
[hyperopt](http://hyperopt.github.io/hyperopt/) based
[tpe](https://papers.nips.cc/paper/2011/file/86e8f7ab32cfd12577bc2619bc635690-Paper.pdf),
[atpe](https://www.electricbrain.io/blog/learning-to-optimize) or
[optuna](https://optuna.readthedocs.io/en/stable/) based
[tpe](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.samplers.TPESampler.html),
[cmaes](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.samplers.CmaEsSampler.html) etc.
See [example](https://github.com/AtrCheema/AI4Water/blob/master/examples/hyper_para_opt.ipynb)
using its application.
- cut short the time to write boilerplate code in developing machine learning
based models.
- It should be possible to overwrite/customize any of the functionality of the AI4Water's `Model`
by subclassing the
`Model`. So at the highest level you just need to initiate the `Model`, and then need `fit`, `predict` and
`view_model` methods of `Model` class, but you can go as low as you could go with tensorflow/keras.
- All the above functionalities should be available without complicating keras
implementation.
## Installation
An easy way to install ai4water is using pip
pip install ai4water
You can also use GitHub link
python -m pip install git+https://github.com/AtrCheema/AI4Water.git
or using setup file, go to folder where repo is downloaded
python setup.py install
The latest code however (possibly with fewer bugs and more features) can be installed from `dev` branch instead
python -m pip install git+https://github.com/AtrCheema/AI4Water.git@dev
To install the latest branch (`dev`) with all requirements use the following command
python -m pip install "AI4Water[all] @ git+https://github.com/AtrCheema/AI4Water.git@dev"
### installation options
`all` keyword will install all the dependencies. You can choose the dependencies of particular sub-module
by using the specific keyword. Following keywords are available
- `hpo` if you want hyperparameter optimization
- `post_process` if you want postprocessing
- `exp` for experiments sub-module
## Sub-modules
AI4Water consists of several submodules, each of wich responsible for a specific tasks.
The modules are also liked with each other. For understanding sub-module structure of
ai4water, [see this article](https://ai4water.readthedocs.io/en/dev/understanding.html)
<p float="left">
<img src="/docs/source/imgs/architecture.png" width="800" height="700"/>
</p>
## How to use
Build a `Model` by providing all the arguments to initiate it.
```python
from ai4water import Model
from ai4water.models import MLP
from ai4water.datasets import mg_photodegradation
data, *_ = mg_photodegradation(encoding="le")
model = Model(
# define the model/algorithm
model=MLP(units=24, activation="relu", dropout=0.2),
# columns in data file to be used as input
input_features=data.columns.tolist()[0:-1],
# columns in csv file to be used as output
output_features=data.columns.tolist()[-1:],
lr=0.001, # learning rate
batch_size=8, # batch size
epochs=500, # number of epochs to train the neural network
patience=50, # used for early stopping
)
```
Train the model by calling the `fit()` method
```python
history = model.fit(data=data)
```
<p float="left">
<img src="/docs/source/imgs/mlp_loss.png" width="500" />
</p>
After training, we can make predictions from it on test/training data
```python
prediction = model.predict_on_test_data(data=data)
```
<p float="left">
<img src="/docs/source/imgs/mlp_reg.png" width="400" />
<img src="/docs/source/imgs/mlp_residue.png" width="400" />
</p>
<p float="left">
<img src="/docs/source/imgs/mlp_line.png" width="400" />
<img src="/docs/source/imgs/mlp_edf.png" width="400" />
</p>
The model object returned from initiating AI4Water's `Model` is same as that of Keras' `Model`
We can verify it by checking its type
```python
import tensorflow as tf
isinstance(model, tf.keras.Model) # True
```
## Using your own pre-processed data
You can use your own pre-processed data without using any of pre-processing tools of AI4Water. You will need to provide
input output paris to `data` argument to `fit` and/or `predict` methods.
```python
import numpy as np
from ai4water import Model # import any of the above model
from ai4water.models import LSTM
batch_size = 16
lookback = 15
inputs = ['dummy1', 'dummy2', 'dummy3', 'dummy4', 'dummy5'] # just dummy names for plotting and saving results.
outputs=['DummyTarget']
model = Model(
model = LSTM(units=64),
batch_size=batch_size,
ts_args={'lookback':lookback},
input_features=inputs,
output_features=outputs,
lr=0.001
)
x = np.random.random((batch_size*10, lookback, len(inputs)))
y = np.random.random((batch_size*10, len(outputs)))
model.fit(x=x,y=y)
```
## using for `scikit-learn`/`xgboost`/`lgbm`/`catboost` based models
The repository can also be used for machine learning based models such as scikit-learn/xgboost based models for both
classification and regression problems by making use of `model` keyword arguments in `Model` function.
However, integration of ML based models is not complete yet.
```python
from ai4water import Model
from ai4water.datasets import busan_beach
data = busan_beach() # path for data file
model = Model(
# columns in data to be used as input
input_features=['tide_cm', 'wat_temp_c', 'sal_psu', 'rel_hum', 'pcp_mm'],
output_features = ['tetx_coppml'], # columns in data file to be used as input
seed=1872,
val_fraction=0.0,
split_random=True,
# any regressor from https://scikit-learn.org/stable/modules/classes.html
model={"RandomForestRegressor": {}}, # set any of regressor's parameters. e.g. for RandomForestRegressor above used,
# some of the paramters are https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html#sklearn.ensemble.RandomForestRegressor
)
history = model.fit(data=data)
model.predict_on_test_data(data=data)
```
# Hyperparameter optimization
For hyperparameter optimization, replace the actual values of hyperparameters
with the space.
```python
from ai4water.functional import Model
from ai4water.datasets import MtropicsLaos
from ai4water.hyperopt import Real, Integer
data = MtropicsLaos().make_regression(lookback_steps=1)
model = Model(
model = {"RandomForestRegressor": {
"n_estimators": Integer(low=5, high=30, name='n_estimators', num_samples=10),
"max_leaf_nodes": Integer(low=2, high=30, prior='log', name='max_leaf_nodes', num_samples=10),
"min_weight_fraction_leaf": Real(low=0.0, high=0.5, name='min_weight_fraction_leaf', num_samples=10),
"max_depth": Integer(low=2, high=10, name='max_depth', num_samples=10),
"min_samples_split": Integer(low=2, high=10, name='min_samples_split', num_samples=10),
"min_samples_leaf": Integer(low=1, high=5, name='min_samples_leaf', num_samples=10),
}},
input_features=data.columns.tolist()[0:-1],
output_features=data.columns.tolist()[-1:],
cross_validator = {"KFold": {"n_splits": 5}},
x_transformation="zscore",
y_transformation="log",
)
# First check the performance on test data with default parameters
model.fit_on_all_training_data(data=data)
print(model.evaluate_on_test_data(data=data, metrics=["r2_score", "r2"]))
# optimize the hyperparameters
optimizer = model.optimize_hyperparameters(
algorithm = "bayes", # you can choose between `random`, `grid` or `tpe`
data=data,
num_iterations=60,
)
# Now check the performance on test data with default parameters
print(model.evaluate_on_test_data(data=data, metrics=["r2_score", "r2"]))
```
Running the above code will optimize the hyperparameters and generate
following figures
<p float="left">
<img src="/docs/source/imgs/hpo_ml_convergence.png" width="400" />
<img src="/docs/source/imgs/hpo_fanova_importance_hist.png" width="400" />
</p>
<p float="left">
<img src="/docs/source/imgs/hpo_objective.png" width="500" />
<img src="/docs/source/imgs/hpo_evaluations.png" width="500" />
</p>
<p float="left">
<img src="/docs/source/imgs/hpo_parallel_coordinates.png" width="500" />
</p>
# Experiments
The experiments module is for comparison of multiple models on a single data
or for comparison of one model under different conditions.
```python
from ai4water.datasets import busan_beach
from ai4water.experiments import MLRegressionExperiments
data = busan_beach()
comparisons = MLRegressionExperiments(
input_features=data.columns.tolist()[0:-1],
output_features=data.columns.tolist()[-1:],
split_random=True
)
# train all the available machine learning models
comparisons.fit(data=data)
# Compare R2 of models
best_models = comparisons.compare_errors(
'r2',
data=data,
cutoff_type='greater',
cutoff_val=0.1,
figsize=(8, 9),
colors=['salmon', 'cadetblue']
)
# Compare model performance using Taylor diagram
_ = comparisons.taylor_plot(
data=data,
figsize=(5, 9),
exclude=["DummyRegressor", "XGBRFRegressor",
"SGDRegressor", "KernelRidge", "PoissonRegressor"],
leg_kws={'facecolor': 'white',
'edgecolor': 'black','bbox_to_anchor':(2.0, 0.9),
'fontsize': 10, 'labelspacing': 1.0, 'ncol': 2
},
)
```
<p float="left">
<img src="/docs/source/imgs/exp_r2.png" width="500" />
<img src="/docs/source/imgs/exp_taylor.png" width="500" />
</p>
For more comprehensive and detailed examples see [](https://ai4water.readthedocs.io/projects/Examples/en/latest/?badge=latest)
## Disclaimer
The library is still under development. Fundamental changes are expected without prior notice or
without regard of backward compatability.
#### Related
[sktime: A Unified Interface for Machine Learning with Time Series](https://github.com/alan-turing-institute/sktime)
[Seglearn: A Python Package for Learning Sequences and Time Series](https://github.com/dmbee/seglearn)
[Pastas: Open Source Software for the Analysis of Groundwater Time Series](https://github.com/pastas/pastas)
[Time Series FeatuRe Extraction on basis of Scalable Hypothesis tests (tsfresh -- A Python package)](https://github.com/blue-yonder/tsfresh)
[MLAir](https://gmd.copernicus.org/preprints/gmd-2020-332/)
[pyts: A Python Package for Time Series Classification](https://github.com/johannfaouzi/pyts)
[Tslearn, A Machine Learning Toolkit for Time Series Data](https://github.com/tslearn-team/tslearn)
[TSFEL: Time Series Feature Extraction Library](https://doi.org/10.1016/j.softx.2020.100456)
[catch22](https://github.com/chlubba/catch22)
[vest](https://github.com/vcerqueira/vest-python)
[pyunicorn (Unified Complex Network and RecurreNce analysis toolbox](https://github.com/pik-copan/pyunicorn)
[TSFuse Python package for automatically constructing features from multi-view time series data](https://github.com/arnedb/tsfuse)
[Catalyst](https://github.com/catalyst-team/catalyst)
[tsai - A state-of-the-art deep learning library for time series and sequential data](https://github.com/timeseriesAI/tsai)
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/README.md | README.md |
utility functions
*****************
Some utility functions
prepare_data
============
.. automodule:: ai4water.utils.utils
:members: prepare_data
get_attributes
==============
.. automodule:: ai4water.backend
:members: get_attributes
murphy_diagram
==============
.. automodule:: ai4water.utils.visualizations
:members: murphy_diagram
fdc_plot
==============
.. automodule:: ai4water.utils.visualizations
:members: fdc_plot
edf_plot
========
.. automodule:: ai4water.utils.visualizations
:members: edf_plot
jsonize
=======
.. automodule:: ai4water.utils.utils
:members: jsonize
TrainTestSplit
===============
.. automodule:: ai4water.utils.utils
:members: TrainTestSplit | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/utils.rst | utils.rst |
Experiments
***********
The purpose of this module is to compare more than one models. Furthermore,
this module can also optimize the hyper-parameters of these models and compare
them. The Experiments class provides the basic building block for conducting
experiments. The MLRegressionExperiments and MLClassificationExperiments compare
several classical machine learning regression and classification models respectively.
The DLRegressionExperiments class compares some common basic deep learning algorithms
for a given data.
Experiments
===========
.. autoclass:: ai4water.experiments.Experiments
:members:
:show-inheritance:
.. automethod:: __init__
RegressionExperiments
=====================
.. autoclass:: ai4water.experiments.MLRegressionExperiments
:members:
:show-inheritance:
.. automethod:: __init__
.. autoclass:: ai4water.experiments.DLRegressionExperiments
:members:
:show-inheritance:
.. automethod:: __init__
ClassificationExperiments
=========================
.. autoclass:: ai4water.experiments.MLClassificationExperiments
:members:
:show-inheritance:
.. automethod:: __init__
DLRegressionExperiments
=========================
.. autoclass:: ai4water.experiments.DLRegressionExperiments
:members:
:show-inheritance:
.. automethod:: __init__,
input_shape,
model_MLP,
model_LSTM,
model_CNN,
model_CNNLSTM,
model_LSTMAutoEncoder,
model_TCN,
model_TemporalFusionTransformer,
DLClassificationExperiments
============================
.. autoclass:: ai4water.experiments.DLClassificationExperiments
:members:
:show-inheritance:
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/experiments.rst | experiments.rst |
Model
******
BaseModel
=========
The core of `AI4Water` is the `Model` class which builds and trains the machine learning model.
This class interacts with pre-processing and post-processing modules.
The `Model` class uses a python dictionary to build layers of neural networks.
To build Tensorflow based models using python dictionary see the guide
for :doc:`declarative_def_tf`. To build pytorch based NN models using python dictionary see the guide
for :doc:`declarative_def_torch` .
.. autoclass:: ai4water._main.BaseModel
:members:
__init__,
training_data,
validation_data,
test_data,
all_data,
fit,
fit_on_all_training_data,
evaluate,
evaluate_on_training_data,
evaluate_on_validation_data,
evaluate_on_test_data,
evaluate_on_all_data,
predict,
predict_on_training_data,
predict_on_validation_data,
predict_on_test_data,
predict_on_all_data,
predict_proba,
predict_log_proba,
interpret,
view,
eda,
score,
from_config,
from_config_file,
update_weights,
activations,
cross_val_score,
explain,
explain_example,
shap_values,
prediction_analysis,
partial_dependence_plot,
optimize_transformations,
optimize_hyperparameters,
permutation_importance,
sensitivity_analysis,
seed_everything
Model subclassing
======================
Model subclassing is different from functional API in the way the model (neural network)
is constructed. To understand the difference between model-subclassing API and functional
API see :ref:`sub_vs_func`
.. automodule:: ai4water.main.Model
:members:
__init__,
initialize_layers,
build_from_config,
forward,
fit_pytorch,
Model for functional API
============================
.. autoclass:: ai4water.functional.Model
:members:
__init__,
add_layers,
compile,
build,
Pytorch Learner
===============
This module can be used to train models which are built outside `AI4Water`'s model class.
Thus, this module does not do any pre-processing, model building and post-processing of results.
This module is inspired from fastai's Learner_ and keras's Model_ class.
.. autoclass:: ai4water.models._torch.Learner
:undoc-members:
:show-inheritance:
:members:
__init__,
fit,
evaluate,
predict,
update_metrics,
update_weights,
plot_model,
.. _Learner:
https://docs.fast.ai/learner.html#Learner
.. _Model:
https://www.tensorflow.org/api_docs/python/tf/keras/Model | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/model.rst | model.rst |
.. _dec_def_tf:
declarative model definition for tensorflow
*******************************************
We can construct a normal layered model using keras layers by placing the layers in a dictionary. The keys in the
dictionary must be a keras layer and optionally can have an identifier separated by an underscore `_` in order to
differentiate it from other similar layers in the model. For example `Dense_0` and `Dense_1` are two
Dense_ layers.
The input/initializing arguments in the layer must be
enclosed in a dictionary within the layer. To find out what input/initializing arguments can be used, check
documentation of corresponding layer in Tensorflow_ docs.
It should be noted that the layer name is case-sensitive. Therefore, Dense layer cannot be DENSE.
multi-layer perceptron
======================
.. code-block:: python
>>> from ai4water import Model
>>> from ai4water.datasets import arg_beach
>>> import pandas as pd
...
>>> layers = {"Dense_0": {'units': 64, 'activation': 'relu'},
... "Dropout_0": 0.3, # 0.3 here refers to 'rate' keyword argument in Dropout layer in Tensorflow API
... "Dense_1": {'units': 32, 'activation': 'relu'},
... "Dropout_1": 0.3,
... "Dense_2": {'units': 16, 'activation': 'relu'},
... "Dense_3": 1 # 1 refers to 'units' keyword argument in Dense layer in Tensorflow
... }
...
>>> df = arg_beach()
...
>>> model = Model(
... input_features=df.columns.tolist()[0:-1],
... output_features=df.columns.tolist()[-1:],
... model={'layers':layers},
... )
.. image:: imgs/mlp.png
:align: center
:height: 500
LSTM based model
=================
In following case a Dense_ layer with one `units` is added automatically at the end of
second `LSTM` layer.
.. code-block:: python
>>> import pandas as pd
...
>>> from ai4water import Model
>>> from ai4water.datasets import arg_beach
...
>>> layers = {"LSTM_0": {'units': 64, 'return_sequences': True},
... "LSTM_1": 32,
... "Dense": 1
... }
...
>>> df = arg_beach
...
>>> model = Model(ts_args={"lookback": 12}
... input_features=df.columns.tolist()[0:-1],
... output_features=df.columns.tolist()[-1:],
... model={'layers':layers})
.. image:: imgs/lstm.png
:align: center
:height: 500
1d CNN based model
====================
If a layer does not receive any input arguments for its initialization, still an empty dictionary must be provided.
Activation functions can also be used as a separate layer.
.. code-block:: python
>>> layers = {"Conv1D_9": {'filters': 64, 'kernel_size': 2},
... "Dropout": 0.3,
... "Conv1D_1": {'filters': 32, 'kernel_size': 2},
... "MaxPool1D": 2,
... 'Flatten': {}, # This layer does not receive any input arguments
... 'LeakyReLU': {}, # activation function can also be used as a separate layer
... "Dense": 1
... }
.. image:: imgs/cnn.png
:align: center
:height: 600
LSTM -> CNN based model
========================
.. code-block:: python
>>> layers = {"LSTM": {'units': 64, 'return_sequences': True},
... "Conv1D_0": {'filters': 64, 'kernel_size': 2},
... "Dropout": 0.3,
... "Conv1D_1": {'filters': 32, 'kernel_size': 2},
... "MaxPool1D": 2,
... 'Flatten': {},
... 'LeakyReLU': {},
... "Dense": 1
... }
.. image:: imgs/lstm_cnn.png
:align: center
:height: 700
ConvLSTM based model
=====================
AI4Water will infer input shape for general cases however it is better to explicitly define the Input_ layer
when the input is > 3d or the number of inputs are more than one.
.. code-block:: python
>>> layers = {'Input': {'shape': (3, 1, 4, 8)},
... 'ConvLSTM2D': {'filters': 64, 'kernel_size': (1, 3), 'activation': 'relu'},
... 'Flatten': {},
... 'RepeatVector': 1,
... 'LSTM': {'units': 128, 'activation': 'relu', 'dropout': 0.3, 'recurrent_dropout': 0.4 },
... 'Dense': 1
... }
.. image:: imgs/convlstm.png
:align: center
:height: 500
CNN -> LSTM
==========================
If a layer is to be enclosed in `TimeDistributed` layer, just add the layer followed
by `TimeDistributed` as shown below. In following, 3 `Conv1D` layers are enclosed
in `TimeDistributed` layer. Similarly `Flatten` and `MaxPool1D` are also
wrapped in `TimeDistributed` layer.
.. code-block:: python
>>> sub_sequences = 3
>>> lookback = 15
>>> time_steps = lookback // sub_sequences
>>> layers = {
... "Input": {'config': {'shape': (None, time_steps, 10)}},
... "TimeDistributed_0": {},
... 'Conv1D_0': {'filters': 64, 'kernel_size': 2},
... 'LeakyReLU_0': {},
... "TimeDistributed_1":{},
... 'Conv1D_1': {'filters': 32, 'kernel_size': 2},
... 'ELU_1': {},
... "TimeDistributed_2": {},
... 'Conv1D_2': {'filters': 16, 'kernel_size': 2},
... 'tanh_2': {},
... "TimeDistributed_3": {},
... "MaxPool1D": {'pool_size': 2},
... "TimeDistributed_4": {},
... 'Flatten': {},
... 'LSTM_0': {'units': 64, 'activation': 'relu', 'dropout': 0.4, 'recurrent_dropout': 0.5,
... 'return_sequences': True, 'name': 'lstm_0'},
... 'relu_1': {},
... 'LSTM_1': {'units': 32, 'activation': 'relu', 'dropout': 0.4,
... 'recurrent_dropout': 0.5, 'name': 'lstm_1'},
... 'sigmoid_2': {},
... 'Dense': 1
>>> }
.. image:: imgs/cnn_lstm.png
:height: 1200
LSTM based auto-encoder
========================
.. code-block:: python
>>> layers = {
... 'LSTM_0': {'units': 100, 'dropout': 0.3, 'recurrent_dropout': 0.4},
... "LeakyReLU_0": {},
... 'RepeatVector': 11,
... 'LSTM_1': {'units': 100, 'dropout': 0.3, 'recurrent_dropout': 0.4},
... "relu_1": {},
... 'Dense': 1
>>> }
.. image:: imgs/lstm_autoenc.png
:align: center
:height: 500
TCN layer
=========
You can use third party layers such as `tcn`_ which is currently not supported by
Tensorflow. Provided you have installed `tcn`, the layer along with its arguments
can be used as following
.. code-block:: python
>>> layers = {"TCN": {'nb_filters': 64,
... 'kernel_size': 2,
... 'nb_stacks': 1,
... 'dilations': [1, 2, 4, 8, 16, 32],
... 'padding': 'causal',
... 'use_skip_connections': True,
... 'return_sequences': False,
... 'dropout_rate': 0.0},
... 'Dense': 1
>>> }
.. image:: imgs/tcn.png
:align: center
:height: 400
Multiple Inputs
===============
In order to build more complex models, where a layer takes more than one inputs,
you can specify the `inputs` key for the layer and specify which inputs the
layer uses. The `value` of the `inputs` dictionary must be a `list` in this
case whose members must be the names of the layers which must have been defined
earlier. The input/initializing arguments in the layer must be enclosed in a
`config` dictionary within the layer in such cases.
.. code-block:: python
>>> from ai4water import Model
>>> class MyModel(Model):
...
>>> def training_data(self, **kwargs) -> (list, list):
... """ write code which returns x and y where x consists of [(samples, 5, 10), (samples, 10)] and y consists of
... list [(samples, 1)]
... """
>>> return
...
>>> def test_data(self, **kwargs):
>>> return
>>> layers = {"Input_0": {"shape": (5, 10), "name": "cont_inputs"},
... "LSTM_0": {"config": { "units": 62, "activation": "leakyrelu", "dropout": 0.4,
... "recurrent_dropout": 0.4, "return_sequences": False, "name": "lstm_0"},
... "inputs": "cont_inputs"},
... "Input_1": {"shape": 10, "name": "disc_inputs"},
... "Dense_0": {"config": {"units": 64,"activation": "leakyrelu", "name": "Dense_0"},
... "inputs": "disc_inputs"},
... "Flatten_0": {"config": {"name": "flatten_0" },
... "inputs": "Dense_0"},
...
... "Concatenate": {"config": {"name": "Concat" },
... "inputs": ["lstm_0", "flatten_0"]},
...
... "Dense_1": {"units": 16, "activation": "leakyrelu", "name": "Dense_1"},
... "Dropout": 0.4,
... "Dense_2": 1
>>> }
As the above model takes two inputs, we will have to overwrite `training_data`,
`validation_data` and `test_data` methods in our own class which should inherit
from `Model` class
.. image:: imgs/lstm_dense.png
Multiple Output Layers
=======================
In some cases a layer returns more than one output and we want to use each of
those outputs in a separate layer. Such models can be built by specifying the
outputs from a layer using `outputs` key. The `value` of the `outputs` key can a
string or a list of strings specifying the names of of outputs, the layer is
returning. We can use these names as inputs to any other layer later in the model.
.. code-block:: python
>>> layers = {
... "LSTM": {'config': {'units': 64, 'return_sequences': True, 'return_state': True},
... 'outputs': ['junk', 'h_state', 'c_state']},
... "Dense_0": {'config': {'units': 1, 'name': 'MyDense'},
... 'inputs': 'h_state'},
... "Conv1D_1": {'config': {'filters': 64, 'kernel_size': 3, 'name': 'myconv'},
... 'inputs': 'junk'},
... "MaxPool1D": {'config': {'name': 'MyMaxPool'},
... 'inputs': 'myconv'},
... "Flatten": {'config': {'name': 'MyFlatten'},
... 'inputs': 'MyMaxPool'},
... "Concatenate": {'config': {'name': 'MyConcat'},
... 'inputs': ['MyDense', 'MyFlatten']},
...
... "Dense": 1
>>> }
.. image:: imgs/multi_output_layer.png
Additional call args
==========================
We might be tempted to provide additional call arguments to a layer. For example,
in tensorflow's LSTM_ layer, we can provide `initial state` of an LSTM. Suppose
we want to use hidden and cell state of one LSTM as initial state for next
LSTM. In such cases we can make use of `call_args` as `key`. The value of
`call_args` must a dictionary. In this way we can provide `keyword` arguments
while calling a layer.
.. code-block:: python
>>> layers ={
... "Input": {'config': {'shape': (15, 8), 'name': "MyInputs"}},
... "LSTM": {'config': {'units': 64, 'return_sequences': True, 'return_state': True, 'name': 'MyLSTM1'},
... 'inputs': 'MyInputs',
... 'outputs': ['junk', 'h_state', 'c_state']},
... "Dense_0": {'config': {'units': 1, 'name': 'MyDense'},
... 'inputs': 'h_state'},
... "Conv1D_1": {'config': {'filters': 64, 'kernel_size': 3, 'name': 'myconv'},
... 'inputs': 'junk'},
... "MaxPool1D": {'config': {'name': 'MyMaxPool'},
... 'inputs': 'myconv'},
... "Flatten": {'config': {'name': 'MyFlatten'},
... 'inputs': 'MyMaxPool'},
...
... "LSTM_3": {"config": {'units': 64, 'name': 'MyLSTM2'},
... 'inputs': 'MyInputs',
... 'call_args': {'initial_state': ['h_state', 'c_state']}},
... "Concatenate": {'config': {'name': 'MyConcat'},
... 'inputs': ['MyDense', 'MyFlatten', 'MyLSTM2']},
... "Dense": 1
>>> }
.. image:: imgs/add_call_args.png
It must be noted that the keys `inputs`, `outputs`, and `call_args` are optional while `config` is mandatory.
lambda layers
==========================
You can also add `lambda`_ layers by placing the
lambda layer definition in the `config` as following:
.. code-block:: python
>>> import tensorflow as tf
>>> from ai4water import Model
>>> import pandas as pd
>>> layers = {
... "LSTM_0": {"config": {"units": 32, "return_sequences": True}},
... "lambda": {"config": tf.keras.layers.Lambda(lambda x: x[:, -1, :])},
... "Dense": {"config": {"units": 1}}
>>> }
... # The model can be seamlessly loaded from the saved json file using
>>> config_path = "path like"
>>> model = Model.from_config(config_path=config_path)
.. image:: imgs/lambda.png
Custom Layers
==============
You can also use your own custom layers which inherit from tensorflow.keras.layers.Layer.
All you need to do is to register your layer in ai4water.tf_attributes.LAYERS as shown
in the example below
.. code-block:: python
>>> import numpy as np
>>> from tensorflow.keras.layers import Dense
>>> # Define a custom Dense layer
>>> class MyDense(Dense):
pass
>>> # register your custom layer
>>> import ai4water.tf_attributes as attributes
>>> attributes.LAYERS['CustomDense'] = MyDense
# import Model from ai4water
>>> from ai4water import Model
# build Model using your custom layer
>>> layers = {"Input": {"shape": (10,)},
"CustomDense": 1}
>>> model = Model(model={"layers": layers})
>>> inp = np.random.random((100, 10))
>>> y = np.random.random(100)
>>> h = model.fit(x=inp, y=y, epochs=1)
For more examples see `examples`.
Activation layers
==================
Following activation layers can be used.
=============== ================
Activation Name in ai4water
=============== ================
relu relu
LeakyReLU LeakyReLU
PReLU PReLU
ThresholdedReLU ThresholdedReLU
ELU ELU
tanh tanh
relu relu
selu selu
sigmoid sigmoid
hardsigmoid hardsigmoid
crelu crelu
relu6 relu6
softmax softmax
softplus softplus
softsign softsign
swish swish
=============== ================
.. _Dense:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense
.. _Tensorflow:
https://www.tensorflow.org/api_docs/python/tf/keras/layers
.. _Input:
https://www.tensorflow.org/api_docs/python/tf/keras/Input
.. _tcn:
https://github.com/philipperemy/keras-tcn
.. _lambda:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Lambda
.. _LSTM:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM#call_arguments_2 | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/declarative_def_tf.rst | declarative_def_tf.rst |
.. ai4water documentation
Welcome to ai4water's documentation!
====================================
.. image:: imgs/monogram.png
.. toctree::
:maxdepth: 2
installation
quick_start
understanding
.. toctree::
:maxdepth: 2
:caption: Modules
model
models
datasets
preprocessing
postprocessing
eda
hpo
experiments
evapotranspiration
utils
auto_examples/index
paper
=====
AI4Water v1.0: An open source python package for modeling hydrological time series using data-driven methods
https://doi.org/10.5194/gmd-15-3021-2022
If you use ai4water in your research, consider citing it using following
BibTeX entry
::
@article{atr2022GMD,
title={AI4Water v1.0: An open source python package for modeling hydrological time series using data-driven methods},
author={Abbas, Ather and Boithias, Laurie and Pachepsky, Yakov and Kim, Kyunghyun and Chun, Jong Ahn and Cho, Kyung Hwa},
journal={Geoscientific Model Development},
VOLUME = {15},
YEAR = {2022},
NUMBER = {7},
PAGES = {3021--3039},
URL = {https://gmd.copernicus.org/articles/15/3021/2022/},
DOI = {10.5194/gmd-15-3021-2022}
publisher={Copernicus}
}
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/index.rst | index.rst |
preprocessing
*************
.. toctree::
:maxdepth: 2
preprocessing/dataset
preprocessing/imputation
preprocessing/featurization
preprocessing/make_hrus
preprocessing/transformation
The preprocessing sub-module contains classes which handles preparation of input data.
The fundamental class is the `DataSet` class which prepares data from a single data
source. If you hvae multiple data sources then you can either use `DataSetUnion`
or `DataSetPipeline` class. The DataSet can take a data in a variety of commonly
found formats such as csv, xlsx and prepares the data so that it can be fed to
`Model` for training. This class works with modules in conjunction with `Imputation` class.
It should be noted that transformations applied in ai4water are part of **Model**.
This means transformations are applied, everytime a call to the model is made using
`fit`, `predict`, `evaluate`, `score` or `predict_prob` methods.
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/preprocessing.rst | preprocessing.rst |
Installation
*************
using pip
=========
The most easy way to install ai4water is using ``pip``
::
pip install ai4water
However, if you are interested in using only specific module of ai4water, you can
choose to install dependencies related to that module only. For example
to use only machine learning based models use can use ``ml`` option as Following
::
pip install ai4water[ml]
For list of all options see :ref:`installation_options`.
using github link
=================
You can also use github link to install ai4water.
::
python -m pip install git+https://github.com/AtrCheema/AI4Water.git
The latest code however (possibly with less bugs and more features) can be installed from ``dev`` branch instead
::
python -m pip install git+https://github.com/AtrCheema/AI4Water.git@dev
To install the latest branch (`dev`) with all requirements use ``all`` keyword
::
python -m pip install "AI4Water[all] @ git+https://github.com/AtrCheema/AI4Water.git@dev"
using setup.py file
===================
go to folder where repository is downloaded
::
python setup.py install
.. _installation_options:
installation options
=====================
The ``all`` option will install all the dependencies. You can choose the dependencies
of particular sub-module by using the specific keyword. Following keywords are available
- ``hpo`` if you want hyperparameter optimization
- ``post_process`` if you want postprocessing
- ``exp`` for experiments sub-module
- ``eda`` for exploratory data analysis sub-module
- ``ml`` for classical machine learning models
- ``tf`` for using tensorflow
- ``torch`` for using pytorch
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/installation.rst | installation.rst |
postprocessing
**************
This consists of modules which handles the output of `Model` after
the model has been trained i.e. after `.fit` method has been called on it.
Please note that the `SeqMetrics` sub-module has been deprecated.
Please use `SeqMetrics <https://seqmetrics.readthedocs.io/en/latest/>`_ library instead.
.. toctree::
:maxdepth: 2
postprocessing/explain
postprocessing/interpret
postprocessing/seqmetrics
postprocessing/visualize
ProcessPredictions
==================
.. autoclass:: ai4water.postprocessing.ProcessPredictions
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
.. automethod:: __call__ | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/postprocessing.rst | postprocessing.rst |
.. _quick_start:
quick start
***********
Build a `Model` by providing all the arguments to initiate it.
For building deep learning models, we can use higher level functions such as :py:class:`ai4water.models.LSTM`.
.. code-block:: python
>>> from ai4water import Model
>>> from ai4water.models import LSTM
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> model = Model(
... model = LSTM(64),
... input_features=['tide_cm', 'wat_temp_c', 'sal_psu', 'air_temp_c', 'pcp_mm'], # columns in csv file to be used as input
... output_features = ['tetx_coppml'], # columns in csv file to be used as output
... ts_args={'lookback': 12} # how much historical data we want to feed to model
>>> )
Train the model by calling the `fit()` method
.. code-block:: python
>>> history = model.fit(data=data)
Make predictions from it
.. code-block:: python
>>> predicted = model.predict()
The model object returned from initiating AI4Water's `Model` is same as that of Keras' `Model`
We can verify it by checking its type
.. code-block:: python
>>> import tensorflow as tf
>>> isinstance(model, tf.keras.Model) # True
Defining layers of neural networks
==================================
Above we had used LSTM model. Other available deep learning models are MLP (:py:class:`ai4water.models.MLP`),
CNN (:py:class:`ai4water.models.CNN`) CNNLSTM (:py:class:`ai4water.models.CNNLSTM`),
TCN (:py:class:`ai4water.models.TCN`) and TFT (:py:class:`ai4water.models.TFT`). On the other hand
if we wish to define the layers of neural networks ourselves, we can also do so using :ref:`dec_def_tf`
.. code-block:: python
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> model = Model(
... model = {'layers': {"LSTM": 64,
... 'Dense': 1}},
... input_features=['tide_cm', 'wat_temp_c', 'sal_psu', 'air_temp_c', 'pcp_mm'],
... output_features = ['tetx_coppml'],
... ts_args={'lookback': 12}
>>> )
Using your own pre-processed data
=================================
You can use your own pre-processed data without using any of pre-processing tools of AI4Water. You will need to provide
input output paris to `data` argument to `fit` and/or `predict` methods.
.. code-block:: python
>>> import numpy as np
>>> from ai4water import Model # import any of the above model
...
>>> batch_size = 16
>>> lookback = 15
>>> inputs = ['dummy1', 'dummy2', 'dummy3', 'dummy4', 'dummy5'] # just dummy names for plotting and saving results.
>>> outputs=['DummyTarget']
...
>>> model = Model(
... model = {'layers': {"LSTM": 64,
... 'Dense': 1}},
... batch_size=batch_size,
... ts_args={'lookback':lookback},
... input_features=inputs,
... output_features=outputs,
... lr=0.001
... )
>>> x = np.random.random((batch_size*10, lookback, len(inputs)))
>>> y = np.random.random((batch_size*10, len(outputs)))
...
>>> history = model.fit(x=x,y=y)
using `scikit-learn`/`xgboost`/`lgbm`/`catboost` based models
=================================================================
The repository can also be used for machine learning based models such as scikit-learn/xgboost based models for both
classification and regression problems by making use of `model` keyword arguments in `Model` function.
However, integration of ML based models is not complete yet.
.. code-block:: python
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
...
>>> data = busan_beach() # path for data file
...
>>> model = Model(
... input_features=['tide_cm', 'wat_temp_c', 'sal_psu', 'air_temp_c', 'pcp_mm'], # columns in csv file to be used as input
... output_features = ['tetx_coppml'],
... val_fraction=0.0,
... # any regressor from https://scikit-learn.org/stable/modules/classes.html
... model={"RandomForestRegressor": {"n_estimators":1000}}, # set any of regressor's parameters. e.g. for RandomForestRegressor above used,
... # some of the parameters are https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html#sklearn.ensemble.RandomForestRegressor
... )
...
>>> history = model.fit(data=data)
...
>>> preds = model.predict()
Using your own (custom) model
=============================
If you don't want to use sklearn/xgboost/catboost/lgbm's Models and you
have your own model. You can use this model seamlessly as far as this
model has .fit, .evaluate and .predict methods.
.. code-block:: python
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> from sklearn.ensemble import RandomForestRegressor
>>> class MyRF(RandomForestRegressor):
>>> pass # your own customized random forest model
>>> data = busan_beach()
>>> model = Model(model=MyRF, mode="regression")
>>> model.fit(data=data)
you can initialize your Model with arguments as well
>>> model = Model(model={MyRF: {"n_estimators": 10}},
>>> mode="regression")
>>> model.fit(data=data)
Hyperparameter optimization
===========================
For hyperparameter optimization, replace the actual values of hyperparameters
with the space.
.. code-block:: python
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> from ai4water.hyperopt import Integer, Real
>>> data = busan_beach()
>>> model = Model(
... model = {'layers': {"LSTM": Integer(low=30, high=100,name="units"),
... 'Dense': 1}},
... input_features=['tide_cm', 'wat_temp_c', 'sal_psu', 'air_temp_c', 'pcp_mm'], # columns in csv file to be used as input
... output_features = ['tetx_coppml'], # columns in csv file to be used as output
... ts_args={'lookback': Integer(low=5, high=15, name="lookback")},
... lr=Real(low=0.00001, high=0.001, name="lr")
>>> )
>>> model.optimize_hyperparameters(data=data,
... algorithm="bayes", # choose between 'random', 'grid' or 'atpe'
... num_iterations=30
... )
Experiments
===========
The experiments module can be used to compare a large range of regression
and classification algorithms. For example, to compare performance of
regression algorithms on your data
.. code-block:: python
>>> from ai4water.datasets import busan_beach
>>> from ai4water.experiments import MLRegressionExperiments
# first compare the performance of all available models without optimizing their parameters
>>> data = busan_beach() # read data file, in this case load the default data
>>> inputs = list(data.columns)[0:-1] # define input and output columns in data
>>> outputs = list(data.columns)[-1]
>>> comparisons = MLRegressionExperiments(
>>> input_features=inputs, output_features=outputs)
>>> comparisons.fit(data=data,run_type="dry_run")
>>> comparisons.compare_errors('r2')
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/quick_start.rst | quick_start.rst |
declarative model definition for pytorch
****************************************
This page describes how to build Neural Networks for pytorch using python dictionary in `ai4water`.
The user can use any layer provided by pytorch such as `Linear` or `LSTM`. Similarly the user
can use any input argument allowed by the particular layer e.g. `bidirectional` for
LSTM_ and `out_features` for
Linear_.
All the examples presented here are similar which were shown for tensorflow's case :doc:`declarative_def_tf`
multi-layer perceptron
======================
.. code-block:: python
from ai4water.datasets import arg_beach
from ai4water import Model
data=arg_beach()
layers = {
"Linear_0": {"in_features": 13, "out_features": 64},
"ReLU_0": {},
"Dropout_0": 0.3,
"Linear_1": {"in_features": 64, "out_features": 32},
"ReLU_1": {},
"Dropout_1": 0.3,
"Linear_2": {"in_features": 32, "out_features": 16},
"Linear_3": {"in_features": 16, "out_features": 1},
}
model = Model(
model={'layers': layers},
input_features=data.columns.tolist()[0:-1],
output_features=data.columns.tolist()[-1:],
)
If we want to do slicing of the outputs of one layer, we can use python's `lambda` function.
In fact any `callable` object can be provided
LSTM based model
=================
.. code-block:: python
layers ={
'LSTM_0': {"config": {'input_size': 13, 'hidden_size': 64, "batch_first": True},
"outputs": ['lstm0_output', 'states_0']}, # LSTM in pytorch returns two values see docs
'LSTM_1': {"config": {'input_size': 64, 'hidden_size': 32, "batch_first": True, "dropout": 0.3},
"outputs": ["lstm1_output", 'states_1'],
"inputs": "lstm0_output"},
'slice': {"config": lambda x: x[:, -1, :], # we want to get the output from last lookback step.
"inputs": "lstm1_output"},
"Linear": {"in_features": 32, "out_features": 1},
}
.. _LSTM:
https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html
.. _Linear:
https://pytorch.org/docs/stable/generated/torch.nn.Linear.html | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/declarative_def_torch.rst | declarative_def_torch.rst |
HyperParameter Optimization
***************************
This module is for optimization of hyper-parameters. The `HyperOpt` class performs
optimization by minimizing the objective which is defined by a user defined
objective function. The space of hyperparameters can be defined by
using `Categorical`, `Integer` and `Real` classes.
For tutorial on using this class, see `tutorials`_
Categorical
===========
.. autoclass:: ai4water.hyperopt.Categorical
:members:
.. automethod:: __init__
Real
====
.. autoclass:: ai4water.hyperopt.Real
:members:
.. automethod:: __init__
Integer
=======
.. autoclass:: ai4water.hyperopt.Integer
:members:
.. automethod:: __init__
HyperOpt
=========
.. autoclass:: ai4water.hyperopt.HyperOpt
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
.. automethod:: __getattr__
.. _tutorials:
https://ai4water.readthedocs.io/projects/Examples/en/dev/_notebooks/main.html
fANOVA
=======
.. autoclass:: ai4water.hyperopt.fANOVA
:members:
.. automethod:: __init__ | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/hpo.rst | hpo.rst |
explain
*******
ShapExplainer
=============
.. autoclass:: ai4water.postprocessing.explain.ShapExplainer
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
LimeMLExplainer
===============
.. autoclass:: ai4water.postprocessing.explain.LimeExplainer
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
PermutationImportance
=====================
.. autoclass:: ai4water.postprocessing.explain.PermutationImportance
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
PartialDependencePlot
=====================
.. autoclass:: ai4water.postprocessing.explain.PartialDependencePlot
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
explain_model
=============
.. automodule:: ai4water.postprocessing.explain.explain_model
:show-inheritance:
explain_model_with_lime
==========================
.. automodule:: ai4water.postprocessing.explain.explain_model_with_lime
:show-inheritance:
explain_model_with_shap
=======================
.. automodule:: ai4water.postprocessing.explain.explain_model_with_shap
:show-inheritance:
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/postprocessing/explain.rst | explain.rst |
Performance Metrics
*******************
Please note that the `SeqMetrics` sub-module has been deprecated.
Please use `SeqMetrics <https://seqmetrics.readthedocs.io/en/latest/>`_ library instead.
SeqMetrics
=========
.. autoclass:: ai4water.postprocessing.SeqMetrics.Metrics
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
RegressionMetrics
==================
.. autoclass:: ai4water.postprocessing.SeqMetrics.RegressionMetrics
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
ClassificationMetrics
=====================
.. autoclass:: ai4water.postprocessing.SeqMetrics.ClassificationMetrics
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
Utils
========
.. autoclass:: ai4water.postprocessing.SeqMetrics.utils.plot_metrics
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/postprocessing/seqmetrics.rst | seqmetrics.rst |
models
*******
DualAttentionModel
==================
.. autoclass:: ai4water.tf_models.DualAttentionModel
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
TemporalFusionTransformer
=========================
.. autoclass:: ai4water.models.tensorflow.TemporalFusionTransformer
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
NBeats
======
.. autoclass:: ai4water.models.tensorflow.NBeats
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
HARHNModel
==========
.. autoclass:: ai4water.pytorch_models.HARHNModel
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
IMVModel
========
.. autoclass:: ai4water.pytorch_models.IMVModel
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/models/models.rst | models.rst |
Tensorflow Layers
*****************
MCLSTM
========
.. autoclass:: ai4water.models._tensorflow.MCLSTM
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
Conditionalize
==============
.. autoclass:: ai4water.models._tensorflow.Conditionalize
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
EALSTM
========
.. autoclass:: ai4water.models._tensorflow.EALSTM
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
TransformerBlocks
==================
.. autoclass:: ai4water.models._tensorflow.TransformerBlocks
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
Transformer
==================
.. autoclass:: ai4water.models._tensorflow.Transformer
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
TabTransformer
==================
.. autoclass:: ai4water.models._tensorflow.private_layers.TabTransformer
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
FTTransformer
==================
.. autoclass:: ai4water.models._tensorflow.private_layers.FTTransformer
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/models/layers.rst | layers.rst |
Data Transformations
********************
Transformations
==================
.. autoclass:: ai4water.preprocessing.transformations.Transformation
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
.. autoclass:: ai4water.preprocessing.transformations.Transformations
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
.. autoclass:: ai4water.preprocessing.transformations.ScalerWithConfig
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
.. autoclass:: ai4water.preprocessing.transformations.PowerTransformer
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
.. autoclass:: ai4water.preprocessing.transformations.FunctionTransformer
:members:
:undoc-members:
:show-inheritance:
.. automethod:: __init__
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/docs/source/preprocessing/transformation.rst | transformation.rst |
<a href="https://colab.research.google.com/github/AtrCheema/AI4Water/blob/dev/examples/paper/customizing_loss_function.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
This notebook shows how to customize loss function when using AI4Water's `Model` class.
AI4Water's Model class has a method named `loss`. Therefore, if the user wishes to modify loss function for neural networks based models, the user has to overwrite this method of Model class.
In this problem, instead of predicted actual observation, we predict quantiles
The loss value function is customized. We use pinball loss. https://www.lokad.com/pinball-loss-function-definition
Inspired from https://www.kaggle.com/ulrich07/quantile-regression-with-keras
```
try:
import ai4water
except (ImportError, ModuleNotFoundError):
!python -m pip install "AI4Water[all] @ git+https://github.com/AtrCheema/AI4Water.git"
import tensorflow as tf
from tensorflow import keras
tf.__version__
import numpy as np
import pandas as pd
np.__version__
from ai4water import Model
class QuantileModel(Model):
def training_data(self, *args, **kwargs):
x,y = super().training_data(*args, **kwargs)
return x, y.reshape(-1,1,1)
def validation_data(self, *args, **kwargs):
x,y = super().training_data(*args, **kwargs)
return x, y.reshape(-1,1,1)
def test_data(self, *args, **kwargs):
x,y = super().training_data(*args, **kwargs)
return x, y.reshape(-1,1,1)
def loss(self):
return qloss
def qloss(y_true, y_pred):
# Pinball loss for multiple quantiles
qs = quantiles
q = tf.constant(np.array([qs]), dtype=tf.float32)
e = y_true - y_pred
v = tf.maximum(q * e, (q - 1) * e)
return keras.backend.mean(v)
# Define a dummy dataset consisting of 6 time-series.
rows = 2000
cols = 6
data = np.arange(int(rows*cols)).reshape(-1, rows).transpose()
data = pd.DataFrame(data, columns=['input_' + str(i) for i in range(cols)],
index=pd.date_range('20110101', periods=len(data), freq='H'),
dtype=float)
# Define Model
layers = {'Dense_0': {'config': {'units': 64, 'activation': 'relu'}},
'Dropout_0': {'config': {'rate': 0.3}},
'Dense_1': {'config': {'units': 32, 'activation': 'relu'}},
'Dropout_1': {'config': {'rate': 0.3}},
'Dense_2': {'config': {'units': 16, 'activation': 'relu'}},
'Dense_3': {'config': {'units': 9}},
'Reshape': {"target_shape": (9, 1)}
}
# Define Quantiles
quantiles = [0.005, 0.025, 0.165, 0.250, 0.500, 0.750, 0.835, 0.975, 0.995]
# Initiate Model
model = QuantileModel(
input_features=['input_' + str(i) for i in range(cols - 1)],
output_features=['input_' + str(cols - 1)],
#lookback=1,
model={'layers': layers},
epochs=10,
quantiles=quantiles)
# Train the model on first 0.6 % of data, while 0.2% of data will be used for validation
h = model.fit(data=data)
```
We can verify that the model is using the `qloss` function as objective function
```
model.loss.__name__
true_y, pred_y = model.predict(return_true=True)
true_y.shape
pred_y.shape
```
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/examples/paper/customizing_loss_function.ipynb | customizing_loss_function.ipynb |
<a href="https://colab.research.google.com/github/AtrCheema/AI4Water/blob/dev/examples/paper/customizing_train_step.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
This file shows a minimal example how to customize `train_step` using the Model class of AI4water
```
try:
import ai4water
except (ImportError, ModuleNotFoundError):
!python -m pip install "AI4Water[all] @ git+https://github.com/AtrCheema/AI4Water.git"
import tensorflow as tf
tf.__version__
from ai4water import Model
from ai4water.datasets import busan_beach
class CustomModel(Model):
def train_step(self, data):
print('custom train_step')
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
model = CustomModel(
model={"layers": {
"Input": {"shape": (13,)},
"Dense_1": 64,
"Dropout_1": 0.2,
"Dense_2": 32,
"Dropout_2": 0.2,
"Dense_3": 16,
"Dropout_3": 0.2,
"Dense_4": 8,
"Dense_5": 1,
}
},
#lookback=1,
lr=8.95e-5
)
h = history = model.fit(data=busan_beach())
```
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/examples/paper/customizing_train_step.ipynb | customizing_train_step.ipynb |
import os
import site # so that dl4seq directory is in path
site.addsitedir(os.path.dirname(os.path.dirname(__file__)) )
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from ai4water.et import HargreavesSamani, ETBase, Penman, PriestleyTaylor
from ai4water.datasets import CAMELS_AUS
from easy_mpl import process_axis
units = {'tmin': 'Centigrade',
'tmax': 'Centigrade',
'rh_min': 'percent',
'rh_max': 'percent',
'solar_rad': 'MegaJourPerMeterSquare'}
constants = dict()
constants['cts'] = 0.0055
constants['pen_ap'] = 2.4
constants['pan_ap'] = 2.4
constants['turc_k'] = 0.013
constants['wind_f'] = 'pen48'
constants['albedo'] = 0.23
constants['a_s'] = 0.23
constants['b_s'] = 0.5
constants['abtew_k'] = 0.52
constants['ct'] = 0.025
constants['tx'] = 3
constants['pan_coeff'] = 0.71
constants['pan_over_est'] = False
constants['pan_est'] = 'pot_et'
constants['CH'] = 0.12
constants['Ca'] = 0.001013
constants['surf_res'] = 70
constants['alphaPT'] = 1.28
constants['lat_rad'] = -37.293684
constants['lat_dec_deg'] = 63.506144
constants['altitude'] = 249
constants['alphaA'] = 0.14
constants['alpha_pt'] = 1.26
dataset = CAMELS_AUS(path=r"D:\mytools\AI4Water\AI4Water\utils\datasets\data\CAMELS\CAMELS_AUS")
inputs = ['mslp_SILO',
'radiation_SILO',
'rh_tmax_SILO',
'tmin_SILO',
'tmax_SILO',
'rh_tmin_SILO',
'vp_deficit_SILO',
'vp_SILO',
'et_morton_point_SILO'
]
data = dataset.fetch(['224206'], dynamic_attributes=inputs, categories=None, st='19700101', en='20141231')
data = data['224206']
data = data.rename(columns={
'tmin_SILO': 'tmin',
'tmax_SILO': 'tmax',
'radiation_SILO': 'sol_rad',
'vapor_pressure': 'vp_SILO',
'rh_tmin_SILO': 'rh_min',
'rh_tmax_SILO': 'rh_max',
'vp_deficit_SILO': 'vp_def'
})
data1 = data[['tmin', 'tmax', 'sol_rad', 'rh_min', 'rh_max']]
eto_model = HargreavesSamani(data1, units=units, constants=constants, verbosity=2)
et_hs = eto_model()
eto_model = ETBase(data1, units=units, constants=constants, verbosity=2)
et_jh = eto_model()
eto_model = Penman(data1, units=units, constants=constants, verbosity=2)
et_penman = eto_model()
et_penman = np.where(et_penman<0.0, np.nan, et_penman)
eto_model = PriestleyTaylor(data1, units=units, constants=constants, verbosity=2)
et_pt = eto_model()
plt.close('all')
fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, sharex='all')
process_axis(ax1,data['et_morton_point_SILO'], color= np.array([0.63797563, 0.05503074, 0.07078517]),
label='Morton', ms=0.5, ylabel='ETP (mm)', legend_kws={"markerscale":4, "loc":"upper right"})
process_axis(ax2, pd.Series(et_hs, data.index), ms=0.5, color=np.array([0.70670405, 0.71039014, 0.54375619]),
label='Hargreaves and Samani', ylabel='ETP (mm)', legend_kws={"markerscale":4, "loc":"upper right"})
process_axis(ax3, et_jh, ms=0.5, color=np.array([0.27822191, 0.7608274, 0.89536561]),
label='Jensen and Haise', ylabel='ETP (mm)', legend_kws={"markerscale":4, "loc":"upper right"})
process_axis(ax4, pd.Series(et_penman, index=data.index), ms=0.5, color=np.array([0.39865179, 0.61455622, 0.57515074]),
label='Penman', ylabel='ETP (mm)', legend_kws={"markerscale":4, "loc":"upper right"})
plt.show() | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/examples/paper/daily_et_methods.py | daily_et_methods.py |
[compare machine learning algos](https://nbviewer.jupyter.org/github/AtrCheema/AI4Water/blob/dev/examples/paper/compare_ml.ipynb)
[interpretable_dl](https://nbviewer.jupyter.org/github/AtrCheema/AI4Water/blob/dev/examples/paper/interpretability.ipynb)
[hru discretization laos](https://nbviewer.jupyter.org/github/AtrCheema/AI4Water/blob/dev/examples/paper/hru_discretization_laos.ipynb)
[hru discretization](https://nbviewer.jupyter.org/github/AtrCheema/AI4Water/blob/dev/examples/paper/hru_discretization.ipynb)
[ecoli modeling with transformations](https://nbviewer.jupyter.org/github/AtrCheema/AI4Water/blob/dev/examples/paper/ecoli_modeling_with_transformations.ipynb)
[reading input data from different file types](https://nbviewer.jupyter.org/github/AtrCheema/AI4Water/blob/dev/examples/paper/input_data_file_types.ipynb)
[Model Explaination using LIME](https://nbviewer.jupyter.org/github/AtrCheema/AI4Water/blob/dev/examples/paper/lime_explainations.ipynb)
[Model Explaination using SHAP](https://nbviewer.jupyter.org/github/AtrCheema/AI4Water/blob/dev/examples/paper/shap_explainations.ipynb)
[Visualizing model](https://nbviewer.jupyter.org/github/AtrCheema/AI4Water/blob/dev/examples/paper/visualize.ipynb)
[customizing loss function](https://nbviewer.jupyter.org/github/AtrCheema/AI4Water/blob/dev/examples/paper/customizing_loss_function.ipynb)
[customizing train step](https://nbviewer.jupyter.org/github/AtrCheema/AI4Water/blob/dev/examples/paper/customizing_train_step.ipynb)
[customizing training step in functional api](https://nbviewer.jupyter.org/github/AtrCheema/AI4Water/blob/dev/examples/paper/customizing_train_step_with_functional_api.ipynb)
[customizing training loop](https://nbviewer.jupyter.org/github/AtrCheema/AI4Water/blob/dev/examples/paper/customizing_training_loop.ipynb) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/examples/paper/readme.md | readme.md |
<a href="https://colab.research.google.com/github/AtrCheema/AI4Water/blob/dev/examples/paper/input_data_file_types.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
This notebook shows how AI4Water can read data f rom different file formats.
If the data is arranged properly in a tabular format, then AI4Water can read data from one of the following file formats:
| file extension | file type |
|----------------|-----------|
| .csv | comma separated file |
| .xlsx | microsoft excel |
| .parquet | parquet|
| .feather | feather|
| .nc | netcdf5|
| .mat | matlab file|
| .npz | numpy compressed file|
We will first save the data in the above mentioned file formats and then show how AI4Water can read data from those files.
```
try:
import ai4water
except (ImportError, ModuleNotFoundError):
!python -m pip install "AI4Water[all] @ git+https://github.com/AtrCheema/AI4Water.git"
import os
import scipy
import numpy as np
from ai4water import Model
from ai4water.datasets import MtropicsLaos
laos = MtropicsLaos()
data = laos.make_regression()
type(data)
data.shape
data.head()
data.tail()
```
First use the above DataFrame as input to Model
```
model = Model(model="RandomForestRegressor")
model.fit(data=data)
train_x, train_y = model.training_data()
print(train_x.shape, train_y.shape)
val_x, val_y = model.validation_data()
print(val_x.shape, val_y.shape)
test_x, test_y = model.test_data()
print(test_x.shape, test_y.shape)
```
Now saving the data into different file formats
```
csv_fname = os.path.join(os.getcwd(), "data.csv")
data.to_csv(csv_fname)
xlsx_fname = os.path.join(os.getcwd(), "data.xlsx")
data.to_excel(xlsx_fname, engine="xlsxwriter")
parq_fname = os.path.join(os.getcwd(), "data.parquet")
data.to_parquet(parq_fname)
feather_fname = os.path.join(os.getcwd(), "data.feather")
data.reset_index().to_feather(feather_fname)
nc_fname = os.path.join(os.getcwd(), "data.nc")
xds = data.to_xarray()
xds.to_netcdf(nc_fname)
npz_fname = os.path.join(os.getcwd(), "data.npz")
np.savez(npz_fname, data.values)
mat_fname = os.path.join(os.getcwd(), "data.mat")
scipy.io.savemat(mat_fname, {'data': data.values})
```
# csv file
```
model = Model(model="RandomForestRegressor",
input_features=['temp', 'rel_hum', 'wind_speed', 'sol_rad',
'water_level', 'pcp', 'susp_pm'],
output_features=['Ecoli_mpn100'])
model.fit(data=csv_fname)
train_x, train_y = model.training_data()
print(train_x.shape, train_y.shape)
val_x, val_y = model.validation_data()
print(val_x.shape, val_y.shape)
test_x, test_y = model.test_data()
print(test_x.shape, test_y.shape)
```
# xlsx file
```
model = Model(model="RandomForestRegressor",
input_features=['temp', 'rel_hum', 'wind_speed', 'sol_rad',
'water_level', 'pcp', 'susp_pm'],
output_features=['Ecoli_mpn100'])
model.fit(data=xlsx_fname)
train_x, train_y = model.training_data()
print(train_x.shape, train_y.shape)
val_x, val_y = model.validation_data()
print(val_x.shape, val_y.shape)
test_x, test_y = model.test_data()
print(test_x.shape, test_y.shape)
```
# parquet file
```
model = Model(model="RandomForestRegressor",
input_features=['temp', 'rel_hum', 'wind_speed', 'sol_rad',
'water_level', 'pcp', 'susp_pm'],
output_features=['Ecoli_mpn100'])
model.fit(data=parq_fname)
train_x, train_y = model.training_data()
print(train_x.shape, train_y.shape)
val_x, val_y = model.validation_data()
print(val_x.shape, val_y.shape)
test_x, test_y = model.test_data()
print(test_x.shape, test_y.shape)
```
# feather file
```
model = Model(model="RandomForestRegressor",
input_features=['temp', 'rel_hum', 'wind_speed', 'sol_rad',
'water_level', 'pcp', 'susp_pm'],
output_features=['Ecoli_mpn100'])
model.fit(data=feather_fname)
train_x, train_y = model.training_data()
print(train_x.shape, train_y.shape)
val_x, val_y = model.validation_data()
print(val_x.shape, val_y.shape)
test_x, test_y = model.test_data()
print(test_x.shape, test_y.shape)
```
# netcdf file
```
model = Model(model="RandomForestRegressor",
input_features=['temp', 'rel_hum', 'wind_speed', 'sol_rad',
'water_level', 'pcp', 'susp_pm'],
output_features=['Ecoli_mpn100'])
model.fit(data=nc_fname)
train_x, train_y = model.training_data()
print(train_x.shape, train_y.shape)
val_x, val_y = model.validation_data()
print(val_x.shape, val_y.shape)
test_x, test_y = model.test_data()
print(test_x.shape, test_y.shape)
```
# npz file
```
model = Model(model="RandomForestRegressor",
input_features=['temp', 'rel_hum', 'wind_speed', 'sol_rad',
'water_level', 'pcp', 'susp_pm'],
output_features=['Ecoli_mpn100'])
model.fit(data=npz_fname)
train_x, train_y = model.training_data()
print(train_x.shape, train_y.shape)
val_x, val_y = model.validation_data()
print(val_x.shape, val_y.shape)
test_x, test_y = model.test_data()
print(test_x.shape, test_y.shape)
```
# mat file
```
model = Model(model="RandomForestRegressor",
input_features=['temp', 'rel_hum', 'wind_speed', 'sol_rad',
'water_level', 'pcp', 'susp_pm'],
output_features=['Ecoli_mpn100'])
```
the model can be trained and used for prediction and internally the Model class will take care of feeding the right data to the machine learning model.
```
model.fit(data=mat_fname)
train_x, train_y = model.training_data()
print(train_x.shape, train_y.shape)
val_x, val_y = model.validation_data()
print(val_x.shape, val_y.shape)
test_x, test_y = model.test_data()
print(test_x.shape, test_y.shape)
model.predict(data='training')
model.predict()
```
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/examples/paper/input_data_file_types.ipynb | input_data_file_types.ipynb |
<a href="https://colab.research.google.com/github/AtrCheema/AI4Water/blob/dev/examples/paper/customizing_training_loop.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
This file describes the minimal example of customizing the whole training function `fit`
using AI4Water's `Model` class.
```
try:
import AI4Water
except ImportError:
!python -m pip install "AI4Water[all] @ git+https://github.com/AtrCheema/AI4Water.git"
import tensorflow as tf
tf.__version__
from ai4water import Model
from ai4water.datasets import busan_beach
# TODO put code in @tf.function
# TODO write validation code
class CustomModel(Model):
def fit(self,
x=None,
y=None,
data='training',
callbacks=None,
**kwargs):
self.is_training = True
# Instantiate an optimizer.
optimizer = self.get_optimizer()
# Instantiate a loss function.
if self.api == 'functional':
loss_fn = self.loss()
_model = self._model
else:
loss_fn = self.loss
_model = self
# Prepare the training dataset.
batch_size = self.config['batch_size']
train_x, train_label = self.training_data(x=x,y=y,data=data)
train_dataset = tf.data.Dataset.from_tensor_slices((train_x, train_label))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
for epoch in range(self.config['epochs']):
print("\nStart of epoch %d" % (epoch,))
# Iterate over the batches of the dataset.
for step, (x_batch_train, full_outputs) in enumerate(train_dataset):
# Open a GradientTape to record the operations run
# during the forward pass, which enables autodifferentiation.
with tf.GradientTape() as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
mask = tf.greater(tf.reshape(full_outputs, (-1,)), 0.0) # (batch_size,)
y_obj = full_outputs[mask] # (vals_present, 1)
if y_obj.shape[0] < 1: # no observations present for this batch so skip this
continue
logits = _model(x_batch_train, training=True) # Logits for this minibatch
logits_obj = logits[mask]
# Compute the loss value for this minibatch.
loss_value = tf.keras.backend.mean(loss_fn(y_obj, logits_obj))
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, _model.trainable_weights)
# grads = [tf.clip_by_norm(g, 1.0) for g in grads]
grads = [tf.clip_by_value(g, -1.0, 1.0) for g in grads]
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
optimizer.apply_gradients(zip(grads, _model.trainable_weights))
# Log every 200 batches.
if step % 20 == 0:
print(f"Training loss at batch {step} is {loss_value} after seeing {(step + 1) * 64} examples")
return loss_value
layers = {"LSTM_0": {'units': 64, 'return_sequences': True},
"LSTM_1": 32,
"Dropout": 0.3,
"Dense": 1
}
beach_data = busan_beach()
input_features = beach_data.columns.tolist()[0:-1]
output_features = beach_data.columns.tolist()[-1:]
model = CustomModel(model={'layers': layers},
batch_size=12,
ts_args={'lookback':15},
lr=8.95e-5,
allow_nan_labels=2,
epochs=10,
input_features=input_features,
output_features=output_features,
train_data='random'
)
history = model.fit(data=beach_data, callbacks={'tensorboard': True})
test_pred = model.predict()
train_pred = model.predict(data='training')
```
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/examples/paper/customizing_training_loop.ipynb | customizing_training_loop.ipynb |
<a href="https://colab.research.google.com/github/AtrCheema/AI4Water/blob/dev/examples/paper/customizing_train_step_with_functional_api.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
This file shows a minimal example how to customize 'train_step' using the functional api of AI4water
```
try:
import ai4water
except (ImportError, ModuleNotFoundError):
!python -m pip install "AI4Water[all] @ git+https://github.com/AtrCheema/AI4Water.git"
import tensorflow as tf
tf.__version__
assert int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) >= 230, f"""This example is only tested with
tensorflow versions above 2.3.0. Your version is {tf.__version__}"""
from ai4water.functional import Model
from ai4water.datasets import busan_beach
class CustomModel(tf.keras.models.Model):
def train_step(self, data):
print('custom train_step')
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
# Define Model
layers = {'Dense_0': {'config': {'units': 64, 'activation': 'relu'}},
'Dropout_0': {'config': {'rate': 0.3}},
'Dense_1': {'config': {'units': 32, 'activation': 'relu'}},
'Dropout_1': {'config': {'rate': 0.3}},
'Dense_2': {'config': {'units': 16, 'activation': 'relu'}},
'Dense_3': {'config': {'units': 9}},
'Reshape': {"target_shape": (9, 1)}
}
beach_data = busan_beach()
input_features = beach_data.columns.tolist()[0:-1]
output_features = beach_data.columns.tolist()[-1:]
model = Model(
model={"layers": {"Dense": 8, "Dense_1": 1}},
batch_size=32,
#lookback=1,
lr=8.95e-5,
epochs=2,
KModel=CustomModel,
input_features=input_features,
output_features=output_features,
train_data='random',
)
history = model.fit(data=beach_data)
# since the statement 'custom train_step' is printed, we have verified that tensorflow
# used our own customized train_step during training.
```
| AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/examples/paper/customizing_train_step_with_functional_api.ipynb | customizing_train_step_with_functional_api.ipynb |
from .backend import np, tf, keras
from ._main import BaseModel
from ai4water.tf_attributes import ACTIVATION_LAYERS, LAYERS, tcn, MULTI_INPUT_LAYERS
from .nn_tools import get_add_call_args, get_call_args
class Model(BaseModel):
"""
Model class with Functional API and inherits from `BaseModel`.
For ML/non-Neural Network based models, there is no difference in functional
or sub-clsasing api. For DL/NN-based models, this class implements functional
api and differs from subclassing api in internal implementation of NN. This
class is usefull, if you want to use the functional API of keras to build
your own NN structure. In such as case you can construct your NN structure
by overwriting `add_layers`. Another advantage of this class is that sometimes,
model_subclsasing is not possible for example due to some bugs in tensorflow.
In such a case this class can be used. Otherwise all the features of ai4water
are available in this class as well.
Example:
>>>from ai4water.functional import Model
"""
def __init__(self, *args, **kwargs):
"""
Initializes and builds the NN/ML model.
"""
self._go_up = True
super().__init__(*args, **kwargs)
def_KModel = None
if keras is not None:
def_KModel = keras.models.Model
self.KModel = kwargs.get('KModel', def_KModel)
self.build()
@property
def api(self):
return 'functional'
@property
def KModel(self):
"""sets k_model.
In case when we want to customize the model such as for implementing custom
`train_step`, we can provide the customized model as input the this Model
class
"""
return self._k_model
@KModel.setter
def KModel(self, x):
self._k_model = x
@property
def weights(self):
"""Returns names of weights in model."""
_ws = []
for w in self._model.weights:
_ws.append(w.name)
return _ws
@property
def layers(self):
if self.category == "ML":
raise NotImplementedError
return self._model.layers
@property
def inputs(self):
if self.category == "ML":
raise NotImplementedError
return self._model.inputs
@property
def outputs(self):
if self.category == "ML":
raise NotImplementedError
return self._model.outputs
@property
def output_shape(self)->tuple:
if self.category == "ML":
raise NotImplementedError
return self._model.output_shape
@property
def trainable_weights(self):
if self.category == "ML":
raise NotImplementedError
return self._model.trainable_weights
@property
def layer_names(self):
_all_layers = []
if self.category == "ML":
return None
for layer in self._model.layers:
_all_layers.append(layer.name)
return _all_layers
@property
def num_input_layers(self) -> int:
if self.category != "DL":
return np.inf
else:
return len(self._model.inputs)
@property
def input_layer_names(self) -> list:
return [lyr.name.split(':')[0] for lyr in self._model.inputs]
@property
def layers_out_shapes(self) -> dict:
""" returns shapes of outputs from all layers in model as dictionary"""
shapes = {}
for lyr in self._model.layers:
shapes[lyr.name] = lyr.output_shape
return shapes
@property
def layers_in_shapes(self) -> dict:
""" returns the shapes of inputs to all layers"""
shapes = {}
for lyr in self._model.layers:
shapes[lyr.name] = lyr.input_shape
return shapes
@property
def fit_fn(self):
return self._model.fit
@property
def evaluate_fn(self):
return self._model.evaluate
@property
def predict_fn(self):
return self._model.predict
def count_params(self):
if self.category == "ML":
raise NotImplementedError
return self._model.count_params()
def _get_dummy_input_shape(self):
shape = ()
if self.config['backend'] == 'tensorflow' and self.category == "DL":
if isinstance(self.model_.inputs, list):
if len(self.model_.inputs) == 1:
shape = self.model_.inputs[0].shape
else:
shape = [inp.shape for inp in self.model_.inputs]
return shape
def first_layer_shape(self):
""" instead of tuple, returning a list so that it can be moified if needed"""
if self.num_input_layers > 1:
shapes = {}
for lyr in self._model.inputs:
shapes[lyr.name] = lyr.shape
return shapes
shape = []
for idx, d in enumerate(self._model.layers[0].input.shape):
if int(tf.__version__[0]) == 1:
if isinstance(d, tf.Dimension): # for tf 1.x
d = d.value
if idx == 0: # the first dimension must remain undefined so that the user may define batch_size
d = -1
shape.append(d)
return shape
def add_layers(self, layers_config: dict, inputs=None):
"""
Builds the NN from dictionary.
Arguments:
layers_config : wholse keys can be one of the following:
`config`: `dict`/lambda, Every layer must contain initializing
arguments as `config` dictionary. The `config` dictionary
for every layer can contain `name` key and its value must be
`str` type. If `name` key is not provided in the config,
the provided layer name will be used as its name e.g in following case
layers = {'LSTM': {'config': {'units': 16}}}
the name of `LSTM` layer will be `LSTM` while in follwoing case
layers = {'LSTM': {'config': {'units': 16, 'name': 'MyLSTM'}}}
the name of the lstm will be `MyLSTM`.
`inputs`: str/list, The calling arguments for the list. If `inputs`
key is missing for a layer, it will be supposed that either
this is an Input layer or it uses previous outputs as inputs.
`outputs`: str/list We can specifity the outputs from a layer
by using the `outputs` key. The value to `outputs` must be a
string or list of strings specifying the name of outputs from
current layer which can be used later in the mdoel.
`call_args`: str/list We can also specify additional call arguments
by `call_args` key. The value to `call_args` must be a string
or a list of strings.
inputs : if None, it will be supposed the the `Input` layer either
exists in `layers_config` or an Input layer will be created
within this method before adding any other layer. If not None,
then it must be in `Input` layer and the remaining NN architecture
will be built as defined in `layers_config`. This can be handy
when we want to use this method several times to build a complex
or parallel NN structure. avoid `Input` in layer names.
Returns:
inputs :
outputs :
"""
lyr_cache = {}
wrp_layer = None # indicator for wrapper layers
first_layer = True
idx = 0
for lyr, lyr_args in layers_config.items():
idx += 0
if callable(lyr) and hasattr(lyr, '__call__'):
LAYERS[lyr.__name__] = lyr
self.config['model']['layers'] = update_layers_config(layers_config, lyr)
lyr = lyr.__name__
lyr_config, lyr_inputs, named_outs, call_args = self.deconstruct_lyr_args(lyr, lyr_args)
if callable(lyr) and not hasattr(lyr, '__call__'):
lyr = "lambda"
lyr_name, args, lyr_config, activation = self.check_lyr_config(lyr, lyr_config)
# may be user has defined layers without input layer, in this case add Input layer as first layer
if first_layer:
if inputs is not None: # This method was called by providing it inputs.
assert isinstance(inputs, tf.Tensor)
lyr_cache["Input"] = inputs
# since inputs have been defined, all the layers that will be added will be next to first layer
first_layer = False
layer_outputs = inputs
assign_dummy_name(layer_outputs, 'input')
elif lyr_name != "Input":
if 'input_shape' in lyr_config: # input_shape is given in the first layer so make input layer
layer_outputs = LAYERS["Input"](shape=lyr_config['input_shape'])
assign_dummy_name(layer_outputs, 'input')
else:
# for simple dense layer based models, lookback will not be used
def_shape = (self.num_ins,) if self.lookback == 1 else (self.lookback, self.num_ins)
layer_outputs = LAYERS["Input"](shape=def_shape)
# first layer is built so next iterations will not be for first layer
first_layer = False
# put the first layer in memory to be used for model compilation
lyr_cache["Input"] = layer_outputs
# add th layer which the user had specified as first layer
assign_dummy_name(layer_outputs, 'input')
if lyr_inputs is None: # The inputs to the layer have not been specified, so either it is an Input layer
# or it uses the previous outputs as inputs
if lyr_name == "Input":
# it is an Input layer, hence should not be called
layer_outputs = LAYERS[lyr_name](*args, **lyr_config)
assign_dummy_name(layer_outputs, 'input')
else:
# it is executable and uses previous outputs as inputs
if lyr_name in ACTIVATION_LAYERS:
layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])(layer_outputs)
elif lyr_name in ['TimeDistributed', 'Bidirectional']:
wrp_layer = LAYERS[lyr_name]
lyr_cache[lyr_name] = wrp_layer
continue
elif "LAMBDA" in lyr_name.upper():
# lyr_config is serialized lambda layer, which needs to be deserialized
# by default the lambda layer takes the previous output as input
# however when `call_args` are provided, they overwrite the layer_outputs
if call_args is not None: # todo, add example in docs
layer_outputs = get_add_call_args(call_args, lyr_cache, lyr_config['name'])
layer_outputs = tf.keras.layers.deserialize(lyr_config)(layer_outputs)
# layers_config['lambda']['config'] still contails lambda, so we need to replace the python
# object (lambda) with the serialized version (lyr_config) so that it can be saved as json file.
layers_config[lyr]['config'] = lyr_config
else:
if wrp_layer is not None:
layer_outputs = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))(layer_outputs)
wrp_layer = None
else:
add_args = get_add_call_args(call_args, lyr_cache, lyr_config['name'])
layer_initialized = LAYERS[lyr_name](*args, **lyr_config)
layer_outputs = layer_initialized(layer_outputs, **add_args)
self.get_and_set_attrs(layer_initialized)
else: # The inputs to this layer have been specified so they must exist in lyr_cache.
# it is an executable
if lyr_name in ACTIVATION_LAYERS:
call_args, add_args = get_call_args(lyr_inputs, lyr_cache, call_args, lyr_config['name'])
layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])(call_args, **add_args)
elif lyr_name in ['TimeDistributed', 'Bidirectional']:
wrp_layer = LAYERS[lyr_name]
lyr_cache[lyr_name] = wrp_layer
continue
elif "LAMBDA" in lyr_name.upper():
call_args, add_args = get_call_args(lyr_inputs, lyr_cache, call_args, lyr_config['name'])
layer_outputs = tf.keras.layers.deserialize(lyr_config)(call_args)
layers_config[lyr]['config'] = lyr_config
else:
if wrp_layer is not None:
call_args, add_args = get_call_args(lyr_inputs, lyr_cache, call_args, lyr_config['name'])
layer_outputs = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))(call_args, **add_args)
wrp_layer = None
else:
call_args, add_args = get_call_args(lyr_inputs, lyr_cache, call_args, lyr_config['name'])
layer_initialized = LAYERS[lyr_name](*args, **lyr_config)
# for multi-input layers inputs should be ([a,b,c]) instaed of (a,b,c)
if isinstance(lyr_inputs, list) and lyr_name in MULTI_INPUT_LAYERS:
layer_outputs = layer_initialized(*call_args, **add_args)
else:
layer_outputs = layer_initialized(call_args, **add_args)
self.get_and_set_attrs(layer_initialized)
if activation is not None: # put the string back to dictionary to be saved in config file
lyr_config['activation'] = activation
if named_outs is not None:
if isinstance(named_outs, (list, tuple)) or named_outs.__class__.__name__ in ["ListWrapper"]:
# this layer is returning more than one output
assert len(named_outs) == len(layer_outputs), "Layer {} is expected to return {} " \
"outputs but it actually returns " \
"{}".format(lyr_name, named_outs, layer_outputs)
for idx, out_name in enumerate(named_outs):
self.update_cache(lyr_cache, out_name, layer_outputs[idx])
else:
# this layer returns just one output, TODO, this might be re
self.update_cache(lyr_cache, named_outs, layer_outputs)
self.update_cache(lyr_cache, lyr_config['name'], layer_outputs)
first_layer = False
self.jsonize_lyr_config(lyr_config)
inputs = []
for k, v in lyr_cache.items():
# since the model is not build yet and we have access to only output tensors of each list, this is probably
# the only way to know that how many `Input` layers were encountered during the run of this method. Each
# tensor (except TimeDistributed) has .op.inputs attribute,
# which is empty if a tensor represents output of Input layer.
if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:
if k != "TimeDistributed" and hasattr(v, 'op'):
if hasattr(v.op, 'inputs'):
_ins = v.op.inputs
if len(_ins) == 0:
inputs.append(v)
# not sure if this is the proper way of checking if a layer receives an input or not!
else:
if hasattr(v, '__dummy_name'):
inputs.append(v)
# for case when {Input -> Dense, Input_1}, this method wrongly makes Input_1 as output so in such case use
# {Input_1, Input -> Dense }, thus it makes Dense as output and first 2 as inputs, so throwing warning
if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:
if len(layer_outputs.op.inputs) < 1:
print("Warning: the output is of Input tensor class type")
else:
if 'op' not in dir(layer_outputs): # layer_outputs does not have `op`, which means it has no incoming node
print("Warning: the output is of Input tensor class type")
return inputs, layer_outputs
def compile(self, model_inputs, outputs, **compile_args):
k_model = self.KModel(inputs=model_inputs, outputs=outputs)
k_model.compile(loss=self.loss(), optimizer=self.get_optimizer(), metrics=self.get_metrics(), **compile_args)
if self.verbosity > 0:
k_model.summary()
if self.verbosity >= 0:
self.plot_model(k_model)
return k_model
def build(self, input_shape=None):
self.print_info()
if self.category == "DL":
if self.config.get('model', None) is None:
lyrs = None
else:
lyrs = self.config['model']['layers']
inputs, predictions = self.add_layers(lyrs)
self._model = self.compile(inputs, predictions)
self.info['model_parameters'] = int(self._model.count_params()) if self._model is not None else None
if self.verbosity > 0 and self.config['model'] is not None:
if 'tcn' in self.config['model']['layers']:
if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) >= 250:
# tf >= 2.5 does not have _layers and tcn uses _layers
setattr(self._model, '_layers', self._model.layers)
tcn.tcn_full_summary(self._model, expand_residual_blocks=True)
else:
self.build_ml_model()
if not getattr(self, 'from_check_point', False) and self.verbosity>=0:
# fit may fail so better to save config before as well. This will be overwritten once the fit is complete
self.save_config()
self.update_info()
return
def loss_name(self):
if isinstance(self._model.loss, str):
return self._model.loss
elif hasattr(self._model.loss, 'name'):
return self._model.loss.name
else:
return self._model.loss.__name__
def update_layers_config(layers_config, lyr):
new_config = {}
for k, v in layers_config.items():
if k == lyr:
new_config[lyr.__name__] = v
else:
new_config[k] = v
return new_config
def assign_dummy_name(tensor, dummy_name):
if isinstance(tensor, list):
for t in tensor:
setattr(t, '__dummy_name', dummy_name)
else:
setattr(tensor, '__dummy_name', dummy_name) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/functional.py | functional.py |
import math
import json
import time
import warnings
from types import MethodType
from pickle import PicklingError
from typing import Union, Callable, Tuple, List
from SeqMetrics import RegressionMetrics, ClassificationMetrics
from .nn_tools import NN
from .utils.utils import mad
from .utils.utils import make_model
from .utils.utils import AttribtueSetter
from .utils.utils import get_values
from .utils.utils import DataNotFound
from .utils.utils import maybe_create_path, dict_to_file, dateandtime_now
from .utils.utils import find_best_weight, reset_seed, update_model_config, METRIC_TYPES
from .utils.utils import maybe_three_outputs, get_version_info
from .postprocessing.utils import LossCurve
from .postprocessing import ProcessPredictions
from .postprocessing import feature_interaction
from .postprocessing import prediction_distribution_plot
from .preprocessing import DataSet
from .preprocessing.dataset._main import _DataSet
from .preprocessing.transformations import Transformations
from .models._tensorflow.custom_training import train_step, test_step
import ai4water.backend as K
from .backend import sklearn_models
from ai4water.backend import wandb, WandbCallback
from ai4water.backend import np, pd, plt, os, random
from .backend import tf, keras, torch, catboost_models, xgboost_models, lightgbm_models
if K.BACKEND == 'tensorflow' and tf is not None:
from ai4water.tf_attributes import LOSSES, OPTIMIZERS
elif K.BACKEND == 'pytorch' and torch is not None:
from ai4water.models._torch import LOSSES, OPTIMIZERS
class BaseModel(NN):
""" Model class that implements logic of AI4Water. """
def __init__(
self,
model: Union[dict, str, Callable] = None,
x_transformation: Union[str, dict, list] = None,
y_transformation:Union[str, dict, list] = None,
lr: float = 0.001,
optimizer='Adam',
loss: Union[str, Callable] = 'mse',
quantiles=None,
epochs: int = 14,
min_val_loss: float = 0.0001,
patience: int = 100,
save_model: bool = True,
monitor: Union[str, list] = None,
val_metric: str = None,
cross_validator: dict = None,
wandb_config: dict = None,
seed: int = 313,
prefix: str = None,
path: str = None,
verbosity: int = 1,
accept_additional_args: bool = False,
**kwargs
):
"""
The Model class can take a large number of possible arguments depending
upon the machine learning model/algorithm used. Not all the arguments
are applicable in each case. The user must define only the relevant/applicable
parameters and leave the others as it is.
Parameters
----------
model :
a dictionary defining machine learning model. If you are building
a non-neural network model then this dictionary must consist of
name of name of model as key and the keyword arguments to that
model as dictionary. For example to build a decision forest based model
>>> model = {'DecisionTreeRegressor': {"max_depth": 3,
... "criterion": "mae"}}
The key 'DecisionTreeRegressor' should exactly match the name of
the model from one of following libraries
- `sklearn`_
- `xgboost`_
- `catboost`_
- `lightgbm`_
The value {"max_depth": 3, "criterion": "mae"} is another dictionary
which can be any keyword argument which the `model` (DecisionTreeRegressor
in this case) accepts. The user must refer to the documentation
of the underlying library (scikit-learn for DecisionTreeRegressor)
to find out complete keyword arguments applicable for a particular model.
See `examples <https://ai4water.readthedocs.io/en/latest/auto_examples/dec_model_def_ml.html>`_
to learn how to build machine learning models
If You are building a Deep Learning model using tensorflow, then the key
must be 'layers' and the value must itself be a dictionary defining layers
of neural networks. For example we can build an MLP as following
>>> model = {'layers': {
... "Dense_0": {'units': 64, 'activation': 'relu'},
... "Flatten": {},
... "Dense_3": {'units': 1}
>>> }}
The MLP in this case consists of dense, and flatten layers. The user
can define any keyword arguments which is accepted by that layer in
TensorFlow. For example the `Dense` layer in TensorFlow can accept
`units` and `activation` keyword argument among others. For details
on how to buld neural networks using such layered API
`see examples <https://ai4water.readthedocs.io/en/dev/declarative_def_tf.html>`_
x_transformation:
type of transformation to be applied on x/input data.
The transformation can be any transformation name from
:py:class:`ai4water.preprocessing.transformations.Transformation` .
The user can specify more than
one transformation. Moreover, the user can also determine which
transformation to be applied on which input feature. Default is 'minmax'.
To apply a single transformation on all the data
>>> x_transformation = 'minmax'
To apply different transformations on different input and output features
>>> x_transformation = [{'method': 'minmax', 'features': ['input1', 'input2']},
... {'method': 'zscore', 'features': ['input3', 'input4']}
... ]
Here `input1`, `input2`, `input3` and `input4` are the columns in the
`data`. For more info see :py:class:`ai4water.preprocessing.Transformations`
and :py:class:`ai4water.preprocessing.Transformation` classes.
y_transformation:
type of transformation to be applied on y/label/output data.
lr :, default 0.001.
learning rate,
optimizer : str/keras.optimizers like
the optimizer to be used for neural network training. Default is 'Adam'
loss : str/callable Default is `mse`.
the cost/loss function to be used for training neural networks.
quantiles : list Default is None
quantiles to be used when the problem is quantile regression.
epochs : int Default is 14
number of epochs to be used.
min_val_loss : float Default is 0.0001.
minimum value of validatin loss/error to be used for early stopping.
patience : int
number of epochs to wait before early stopping. Set this value to None
if you don't want to use EarlyStopping.
save_model : bool
whether to save the model or not. For neural networks, the model will
be saved only an improvement in training/validation loss is observed.
Otherwise model is not saved.
monitor : str/list
metrics to be monitored. e.g. ['nse', 'pbias']
val_metric : str
performance metric to be used for validation/cross_validation.
This metric will be used for hyper-parameter optimizationa and
experiment comparison. If not defined then
r2_score_ will be used for regression and accuracy_ will be used
for classification.
cross_validator : dict
selects the type of cross validation to be applied. It can be any
cross validator from sklear.model_selection. Default is None, which
means validation will be done using `validation_data`. To use
kfold cross validation,
>>> cross_validator = {'KFold': {'n_splits': 5}}
batches : str
either `2d` or 3d`.
wandb_config : dict
Only valid if wandb package is installed. Default value is None,
which means, wandb will not be utilized. For simplest case, pass
a dictionary with at least two keys namely `project` and `entity`.
Otherwise use a dictionary of all the
arugments for wandb.init, wandb.log and WandbCallback. For
`training_data` and `validation_data` in `WandbCallback`, pass
`True` instead of providing a tuple as shown below
>>> wandb_config = {'entity': 'entity_name', 'project': 'project_name',
... 'training_data':True, 'validation_data': True}
seed int:
random seed for reproducibility. This can be set to None. The seed
is set to `os`, `tf`, `torch` and `random` modules simultaneously.
Please note that this seed is not set for numpy because that
will result in constant sampling during hyperparameter optimization.
If you want to seed everything, then use following function
>>> model.seed_everything()
prefix : str
prefix to be used for the folder in which the results are saved.
default is None, which means within
./results/model_path
path : str/path like
if not given, new model_path path will not be created.
verbosity : int default is 1
determines the amount of information being printed. 0 means no
print information. Can be between 0 and 3. Setting this value to 0
will also reqult in not showing some plots such as loss curve or
regression plot. These plots will only be saved in self.path.
accept_additional_args : bool Default is False
If you want to pass any additional argument, then this argument
must be set to True, otherwise an error will be raise.
**kwargs:
keyword arguments for :py:meth:`ai4water.preprocessing.DataSet.__init__`
Note
-----
The transformations applied on `x` and `y` data using `x_transformation`
and `y_transformations` are part of **model**. See `transformation`_
Examples
-------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> df = busan_beach()
>>> ann = Model(input_features=df.columns.tolist()[0:-1],
... batch_size=16,
... output_features=df.columns.tolist()[-1:],
... model={'layers': {'Dense': 64, 'Dense': 1}},
... )
>>> history = ann.fit(data=df)
>>> y = ann.predict()
.. _sklearn:
https://scikit-learn.org/stable/modules/classes.html
.. _xgboost:
https://xgboost.readthedocs.io/en/stable/python/index.html
.. _catboost:
https://catboost.ai/en/docs/concepts/python-quickstart
.. _lightgbm:
https://lightgbm.readthedocs.io/en/latest/
.. _transformation:
https://stats.stackexchange.com/q/555839/314919
.. _RegressionMetrics:
https://seqmetrics.readthedocs.io/en/latest/rgr.html#regressionmetrics
.. _r2_score:
https://seqmetrics.readthedocs.io/en/latest/rgr.html#SeqMetrics.RegressionMetrics.r2_score
.. _accuracy:
https://seqmetrics.readthedocs.io/en/latest/cls.html#SeqMetrics.ClassificationMetrics.accuracy
"""
if self._go_up:
maker = make_model(
model=model,
x_transformation=x_transformation,
y_transformation=y_transformation,
prefix=prefix,
path=path,
verbosity=verbosity,
lr=lr,
optimizer=optimizer,
loss=loss,
quantiles=quantiles,
epochs=epochs,
min_val_loss=min_val_loss,
patience=patience,
save_model=save_model,
monitor=monitor,
val_metric=val_metric,
cross_validator=cross_validator,
accept_additional_args=accept_additional_args,
seed=seed,
wandb_config=wandb_config,
**kwargs
)
reset_seed(maker.config['seed'], os=os, random=random, tf=tf, torch=torch)
if tf is not None:
# graph should be cleared everytime we build new `Model` otherwise,
# if two `Models` are prepared in same
# file, they may share same graph.
tf.keras.backend.clear_session()
self.data_config = maker.data_config
self.opt_paras = maker.opt_paras
self._original_model_config = maker.orig_model
NN.__init__(self, config=maker.config)
self.path = None
if verbosity >= 0:
self.path = maybe_create_path(path=path, prefix=prefix)
self.config['path'] = self.path
self.verbosity = verbosity
self.category = self.config['category']
self.mode = self.config.get('mode', None)
self.info = {}
@property
def is_custom_model(self):
return self.config['is_custom_model_']
@property
def model_name(self)->str:
if self.config.get('model_name_', None):
return self.config['model_name_']
model_def = self.config['model']
if isinstance(model_def, str):
return model_def
elif isinstance(model_def, dict):
return list(model_def.keys())[0]
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, _mode=None):
from .experiments.utils import regression_models, classification_models
if _mode:
pass
elif self.config['mode']:
_mode = self.config['mode']
elif self.is_custom_model:
if self.config['loss'] in ['sparse_categorical_crossentropy',
'categorical_crossentropy',
'binary_crossentropy']:
_mode = "classification"
else:
_mode = None
elif self.model_name is not None:
if self.model_name in classification_models():
_mode = "classification"
elif self.model_name in regression_models():
_mode = "regression"
elif 'class' in self.model_name.lower():
_mode = "classification"
elif "regr" in self.model_name.lower():
_mode = "regression"
elif self.config['loss'] in ['sparse_categorical_crossentropy',
'categorical_crossentropy',
'binary_crossentropy']:
_mode = "classification"
elif self.model_name == "layers":
# todo
_mode = "regression"
else:
raise NotImplementedError(f" Can't determine mode for {self.model_name}")
elif self.config['loss'] in ['sparse_categorical_crossentropy',
'categorical_crossentropy',
'binary_crossentropy']:
_mode = "classification"
elif self.model_name == "layers":
# todo
_mode = "regression"
else: # when model_name is None, mode should also be None.
_mode = None
# so that next time don't have to go through all these ifelse statements
self.config['mode'] = _mode
self.data_config['mode'] = _mode
self._mode = _mode
@property
def _estimator_type(self):
if self.mode == "regression":
return "regressor"
return "classifier"
@property
def input_features(self):
if hasattr(self, 'dh_'):
return self.dh_.input_features
return self.config['input_features']
@property
def num_ins(self): # raises error if input_features are not defined
return len(self.input_features)
@property
def output_features(self):
if hasattr(self, 'dh_'):
return self.dh_.output_features
return self.config['output_features']
@property
def num_outs(self):
return len(self.output_features)
@property
def forecast_len(self):
if hasattr(self, 'dh_'):
if isinstance(self.dh_, DataSet):
return self.dh_.ts_args['forecast_len']
else:
return {k: v['forecast_len'] for k, v in self.dh_.ts_args.items()}
return self.config['ts_args']['forecast_len']
@property
def val_metric(self):
if self.mode=='regression':
return self.config['val_metric'] or 'r2_score'
return self.config['val_metric'] or 'accuracy'
@property
def forecast_step(self):
if hasattr(self, 'dh_'):
return self.dh_.ts_args['forecast_step']
return self.config['ts_args']['forecast_step']
def _get_dummy_input_shape(self):
raise NotImplementedError
def build(self, *args, **kwargs):
raise NotImplementedError
@property
def quantiles(self):
return self.config['quantiles']
@property
def act_path(self):
return os.path.join(self.path, 'activations')
@property
def w_path(self):
return os.path.join(self.path, 'weights')
@property
def data_path(self):
return os.path.join(self.path, 'data')
def loss_name(self):
raise NotImplementedError
@property
def teacher_forcing(self): # returns None if undefined
if hasattr(self, 'dh_'):
return self.dh_.teacher_forcing
return self.config['teacher_forcing']
def nn_layers(self):
if hasattr(self, 'layers'):
return self.layers
elif hasattr(self._model, 'layers'):
return self._model.layers
else:
return None
@property
def ai4w_outputs(self):
"""alias for keras.MOdel.outputs!"""
if hasattr(self, 'outputs'):
return self.outputs
elif hasattr(self._model, 'outputs'):
return self._model.outputs
else:
return None
def seed_everything(self, seed = None)->None:
"""resets seeds of numpy, os, random, tensorflow, torch.
If any of these module is not available, the seed for that module
is not set."""
if seed is None:
seed = seed or self.config['seed'] or 313
reset_seed(seed=seed, os=os, np=np, tf=tf, torch=torch, random=random)
return
def trainable_parameters(self) -> int:
"""Calculates trainable parameters in the model
for more [see](https://discuss.pytorch.org/t/how-do-i-check-the-number-of-parameters-of-a-model/4325/9)
"""
if self.config['backend'] == 'pytorch':
return sum(p.numel() for p in self.parameters() if p.requires_grad)
else:
if hasattr(self, 'count_params'):
return int(self.count_params())
else:
return int(self._model.count_params())
def loss(self):
# overwrite this function for a customized loss function.
# this function should return something which can be accepted as 'loss' by the keras Model.
# It can be a string or callable.
if callable(self.config['loss']):
return self.config['loss']
if self.config['backend'] == 'pytorch':
return LOSSES[self.config['loss']]()
return LOSSES[self.config['loss']]
@property
def fit_fn(self):
# this points to the Keras's fit method
return NotImplementedError
@property
def evaluate_fn(self):
# this points to the Keras's evaluate method
return NotImplementedError
@property
def predict_fn(self, *args, **kwargs):
return NotImplementedError
@property
def api(self):
return NotImplementedError
@property
def input_layer_names(self):
return NotImplementedError
@property
def num_input_layers(self):
return NotImplementedError
@property
def layer_names(self):
return NotImplementedError
@property
def dl_model(self):
if self.api == "subclassing":
return self
else:
return self._model
def first_layer_shape(self):
return NotImplementedError
def get_callbacks(self, val_data=None, callbacks=None):
if self.config['backend'] == 'pytorch':
return self.cbs_for_pytorch(val_data, callbacks)
else:
return self.cbs_for_tf(val_data, callbacks)
def cbs_for_pytorch(self, *args, **kwargs):
"""Callbacks for pytorch training."""
return []
def cbs_for_tf(self, val_data=None, callbacks=None):
if callbacks is None:
callbacks = {}
# container to hold all callbacks
_callbacks = list()
_monitor = 'val_loss' if val_data is not None else 'loss'
fname = "{val_loss:.5f}.hdf5" if val_data is not None else "{loss:.5f}.hdf5"
if self.config['save_model'] and self.verbosity>=0:
_callbacks.append(keras.callbacks.ModelCheckpoint(
filepath=self.w_path + f"{os.sep}weights_" + "{epoch:03d}_" + fname,
save_weights_only=True,
monitor=_monitor,
mode='min',
save_best_only=True))
if self.config['patience']:
_callbacks.append(keras.callbacks.EarlyStopping(
monitor=_monitor, min_delta=self.config['min_val_loss'],
patience=self.config['patience'], verbose=0, mode='auto'
))
if 'tensorboard' in callbacks:
tb_kwargs = callbacks['tensorboard']
if 'log_dir' not in tb_kwargs: tb_kwargs['log_dir'] = self.path
_callbacks.append(keras.callbacks.TensorBoard(**tb_kwargs))
callbacks.pop('tensorboard')
if isinstance(callbacks, dict):
for val in callbacks.values():
_callbacks.append(val)
else:
# if any callback provided by user is similar to what we already prepared, take the one
# provided by the user
assert isinstance(callbacks, list)
cbs_provided = [cb.__class__.__name__ for cb in callbacks]
for cb in _callbacks:
if not cb.__class__.__name__ in cbs_provided:
callbacks.append(cb)
_callbacks = callbacks
return _callbacks
def get_val_data(self, validation_data=None):
"""Finds out if there is validation_data"""
user_defined = True
if validation_data is None:
# when validation data is not given in kwargs and validation_data method is overwritten
try:
validation_data = self.validation_data()
user_defined = False
# when x,y is user defined then validation_data() can give this error
except DataNotFound:
validation_data = None
if isinstance(validation_data, tuple):
x, y = validation_data
if x is None and y is None:
return None
elif hasattr(x, '__len__') and len(x)==0:
return None
else: # x,y is numpy array
if not user_defined and self.is_binary_:
if y.shape[1] > self.output_shape[1]:
y = np.argmax(y, 1).reshape(-1,1)
x = self._transform_x(x)
y = self._transform_y(y)
validation_data = x,y
elif validation_data is not None:
if self.config['backend'] == "tensorflow":
if isinstance(validation_data, tf.data.Dataset):
pass
elif validation_data.__class__.__name__ in ['TorchDataset', 'BatchDataset']:
pass
else:
raise ValueError(f'Unrecognizable validattion data {validation_data.__class__.__name__}')
return validation_data
return validation_data
def _call_fit_fn(self, x, **kwargs):
"""
Some preprocessing before calling actual fit
If nans are present in y, then tf.keras.model.fit is called as it
is otherwise it is called with custom train_step and test_step which
avoids calculating loss at points containing nans."""
if kwargs.pop('nans_in_y_exist'): # todo, for model-subclassing?
if not isinstance(x, tf.data.Dataset): # when x is tf.Dataset, we don't have y in kwargs
y = kwargs['y']
assert np.isnan(y).sum() > 0
kwargs['y'] = np.nan_to_num(y) # In graph mode, masking of nans does not work
self._model.train_step = MethodType(train_step, self._model)
self._model.test_step = MethodType(test_step, self._model)
return self.fit_fn(x, **kwargs)
def _fit(self,
inputs,
outputs,
validation_data=None,
validation_steps=None,
callbacks=None,
**kwargs):
nans_in_y_exist = False
if isinstance(outputs, np.ndarray):
if np.isnan(outputs).sum() > 0:
nans_in_y_exist = True
elif isinstance(outputs, list):
for out_array in outputs:
if np.isnan(out_array).sum() > 0:
nans_in_y_exist = True
elif isinstance(outputs, dict):
for out_array in outputs.values():
if np.isnan(out_array).sum() > 0:
nans_in_y_exist = True
validation_data = self.get_val_data(validation_data)
outputs = get_values(outputs)
if isinstance(validation_data, tuple):
# when val_outs is just a dictionary with 1 key/value pair,
# we just extract values and consider it validation_data
val_outs = validation_data[-1]
val_outs = get_values(val_outs)
validation_data = (validation_data[0], val_outs)
if K.BACKEND == 'tensorflow':
callbacks = self.get_wandb_cb(
callbacks,
train_data=(inputs, outputs),
validation_data=validation_data,
)
callbacks = self.get_callbacks(validation_data, callbacks=callbacks)
st = time.time()
# when data is given as generator (tf.data or torchDataset) then
# we don't set batch size and don't given y argument to fit
batch_size = self.config['batch_size']
y = outputs
if K.BACKEND == "tensorflow":
if isinstance(inputs, tf.data.Dataset):
batch_size = None
y = None
elif inputs.__class__.__name__ in ["TorchDataset"]:
batch_size = None
y = None
# natively prepared arguments
_kwargs = {
'x':inputs,
'y':y,
'epochs':self.config['epochs'],
'batch_size':batch_size,
'validation_data':validation_data,
'callbacks':callbacks,
'shuffle':self.config['shuffle'],
'steps_per_epoch':self.config['steps_per_epoch'],
'verbose':max(self.verbosity, 0),
'nans_in_y_exist':nans_in_y_exist,
'validation_steps':validation_steps,
}
# arguments explicitly provided by user during .fit will take priority
for k,v in kwargs.items():
if k in _kwargs:
if k in self.config: # also update config
self.config[k] = v
_kwargs.pop(k)
self._call_fit_fn(
**_kwargs,
**kwargs,
)
self.info['training_time_in_minutes'] = round(float(time.time() - st) / 60.0, 2)
return self.post_fit()
def get_wandb_cb(self, callback, train_data, validation_data) -> dict:
"""Makes WandbCallback and add it in callback"""
if callback is None:
callback = {}
self.use_wandb = False
if wandb is not None:
wandb_config: dict = self.config['wandb_config']
if wandb_config is not None:
self.use_wandb = True
for key in ['project', 'entity']:
assert key in wandb_config, f"wandb_config must have {key} key in it"
wandb.init(name=os.path.basename(self.path),
project=wandb_config.pop('project'),
notes=wandb_config.get('notes', f"{self.mode} with {self.config['backend']}"),
tags=['ai4water', self.api, self.category, self.mode],
entity=wandb_config.pop('entity'))
monitor = self.config.get('monitor', 'val_loss')
if 'monitor' in wandb_config:
monitor = wandb_config.pop('monitor')
add_train_data = False
if 'training_data' in wandb_config:
add_train_data = wandb_config.pop('training_data')
add_val_data = False
if 'validation_data' in wandb_config:
add_val_data = wandb_config.pop('validation_data')
assert callable(WandbCallback)
callback['wandb_callback'] = WandbCallback(monitor=monitor,
training_data=train_data if add_train_data else None,
validation_data=validation_data if add_val_data else None,
**wandb_config)
return callback
def post_fit_wandb(self):
"""does some stuff related to wandb at the end of training."""
if K.BACKEND == 'tensorflow' and self.use_wandb:
getattr(wandb, 'finish')()
return
def post_fit(self):
"""Does some stuff after Keras model.fit has been called"""
if K.BACKEND == 'pytorch':
history = self.torch_learner.history
elif hasattr(self, 'history'):
history = self.history
else:
history = self._model.history
if self.verbosity >= 0:
self.save_config(history.history)
# save all the losses or performance metrics
df = pd.DataFrame.from_dict(history.history)
df.to_csv(os.path.join(self.path, "losses.csv"))
return history
def build_ml_model(self):
"""Builds ml models
Models that follow sklearn api such as xgboost,
catboost, lightgbm and obviously sklearn.
"""
ml_models = {**sklearn_models, **xgboost_models, **catboost_models, **lightgbm_models}
estimator = list(self.config['model'].keys())[0]
kwargs = list(self.config['model'].values())[0]
if estimator in ['HistGradientBoostingRegressor', 'SGDRegressor', 'MLPRegressor']:
if self.config['val_fraction'] > 0.0:
kwargs.update({'validation_fraction': self.config['val_fraction']})
elif self.config['train_fraction'] < 1.0:
kwargs.update({'validation_fraction': 1.0 - self.config['train_fraction']})
# some algorithms allow detailed output during training, this is allowed when self.verbosity is > 1
if estimator in ['OneClassSVM']:
kwargs.update({'verbose': True if self.verbosity > 1 else False})
if estimator in ["CatBoostRegressor", "CatBoostClassifier"]:
# https://stackoverflow.com/a/52921608/5982232
if not any([arg in kwargs for arg in ['verbose', 'silent', 'logging_level']]):
if self.verbosity == 0:
kwargs['logging_level'] = 'Silent'
elif self.verbosity == 1:
kwargs['logging_level'] = 'Verbose'
else:
kwargs['logging_level'] = 'Info'
if 'random_seed' not in kwargs:
kwargs['random_seed'] = self.config['seed']
if estimator in ["XGBRegressor", "XGBClassifier"]:
if 'random_state' not in kwargs:
kwargs['random_state'] = self.config['seed']
# following sklearn based models accept random_state argument
if estimator in [
"AdaBoostRegressor",
"BaggingClassifier", "BaggingRegressor",
"DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor",
"ExtraTreesClassifier", "ExtraTreesRegressor",
"ElasticNet", "ElasticNetCV",
"GradientBoostingClassifier", "GradientBoostingRegressor",
"GaussianProcessRegressor",
"HistGradientBoostingClassifier", "HistGradientBoostingRegressor",
"LogisticRegression",
"Lars",
"Lasso",
"LassoCV",
"LassoLars",
"LinearSVR",
"MLPClassifier", "MLPRegressor",
"PassiveAggressiveClassifier", "PassiveAggressiveRegressor",
"RandomForestClassifier", "RandomForestRegressor",
"RANSACRegressor", "RidgeClassifier",
"SGDClassifier", "SGDRegressor",
"TheilSenRegressor",
]:
if 'random_state' not in kwargs:
kwargs['random_state'] = self.config['seed']
# in sklearn version >1.0 precompute automatically becomes to True
# which can raise error
if estimator in ["ElasticNetCV", "LassoCV"] and 'precompute' not in kwargs:
kwargs['precompute'] = False
self.residual_threshold_not_set_ = False
if estimator == "RANSACRegressor" and 'residual_threshold' not in kwargs:
self.residual_threshold_not_set_ = True
if estimator in ["LGBMRegressor", 'LGBMClassifier']:
if 'random_state' not in kwargs:
kwargs['random_state'] = self.config['seed']
if kwargs.get('boosting_type', None) == "rf" and 'bagging_freq' not in kwargs:
# https://github.com/microsoft/LightGBM/issues/1333
# todo, user must be notified
kwargs['bagging_freq'] = 1
kwargs['bagging_fraction'] = 0.5
if self.is_custom_model:
if hasattr(estimator, '__call__'): # initiate the custom model
model = estimator(**kwargs)
else:
if len(kwargs)>0:
raise ValueError("""Initiating args not allowed because you
provided initiated class in dictionary""")
model = estimator # custom model is already instantiated
# initiate the estimator/model class
elif estimator in ml_models:
model = ml_models[estimator](**kwargs)
else:
from .backend import sklearn, lightgbm, catboost, xgboost
version_info = get_version_info(sklearn=sklearn, lightgbm=lightgbm, catboost=catboost,
xgboost=xgboost)
if estimator in ['TweedieRegressor', 'PoissonRegressor', 'LGBMRegressor', 'LGBMClassifier',
'GammaRegressor']:
sk_maj_ver = int(sklearn.__version__.split('.')[0])
sk_min_ver = int(sklearn.__version__.split('.')[1])
if sk_maj_ver < 1 and sk_min_ver < 23:
raise ValueError(
f"{estimator} is available with sklearn version >= 0.23 but you have {version_info['sklearn']}")
raise ValueError(f"model '{estimator}' not found. {version_info}")
self._model = model
return
def fit(
self,
x=None,
y=None,
data: Union[np.ndarray, pd.DataFrame, "DataSet", str] = 'training',
callbacks: Union[list, dict] = None,
**kwargs
):
"""
Trains the model with data. The data is either ``x`` or it is taken from
``data`` by feeding it to DataSet.
Arguments:
x:
The input data consisting of input features. It can also be
tf.Dataset or TorchDataset.
y:
Correct labels/observations/true data corresponding to 'x'.
data :
Raw data fromw which ``x``,``y`` pairs are prepared. This will be
passed to :py:class:`ai4water.preprocessing.DataSet`.
It can also be an instance if :py:class:`ai4water.preprocessing.DataSet` or
:py:class:`ai4water.preprocessing.DataSetPipeline`.
It can also be name of dataset from :py:attr:`ai4water.datasets.all_datasets`
callbacks:
Any callback compatible with keras. If you want to log the output
to tensorboard, then just use `callbacks={'tensorboard':{}}` or
to provide additional arguments
>>> callbacks={'tensorboard': {'histogram_freq': 1}}
kwargs :
Any keyword argument for the `fit` method of the underlying library.
if 'x' is present in kwargs, that will take precedent over `data`.
Returns:
A keras history object in case of deep learning model with tensorflow
as backend or anything returned by `fit` method of underlying model.
Examples
--------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> model = Model(model="XGBRegressor")
>>> model.fit(data=busan_beach())
using your own data for training
>>> import numpy as np
>>> new_inputs = np.random.random((100, 10))
>>> new_outputs = np.random.random(100)
>>> model.fit(x=new_inputs, y=new_outputs)
"""
if x is not None:
assert y is not None
return self.call_fit(x=x, y=y, data=data, callbacks=callbacks, **kwargs)
def call_fit(self,
x=None,
y=None,
data='training',
callbacks=None,
**kwargs):
visualizer = LossCurve(path=self.path, show=bool(self.verbosity), save=bool(self.verbosity))
self.is_training = True
source = 'training'
if isinstance(data, str) and data in ['validation', 'test']:
source = data
inputs, outputs, _, _, user_defined_x = self._fetch_data(source, x, y, data)
if 'Dataset' in inputs.__class__.__name__:
pass
else:
AttribtueSetter(self, outputs)
num_examples = _find_num_examples(inputs)
if num_examples: # for tf.data, we can't find num_examples
self._maybe_reduce_nquantiles(num_examples)
# apply preprocessing/feature engineering if required.
inputs = self._fit_transform_x(inputs)
outputs = self._fit_transform_y(outputs)
outputs = self._verify_output_shape(outputs)
self.info['training_start'] = dateandtime_now()
if self.category == "DL":
history = self._fit(inputs,
outputs,
callbacks=callbacks,
**kwargs)
if self.verbosity >= 0:
visualizer.plot_loss(history.history)
# for -ve verbosity, weights are not saved! # todo should raise warning
self.load_best_weights()
else:
history = self.fit_ml_models(inputs, outputs, **kwargs)
self.info['training_end'] = dateandtime_now()
if self.verbosity >= 0:
self.save_config()
dict_to_file(os.path.join(self.path, 'info.json'), others=self.info)
self.is_training = False
return history
def _verify_output_shape(self, outputs):
"""verifies that shape of target/true/labels is correct"""
if isinstance(outputs, np.ndarray) and self.category == "DL":
if isinstance(self.ai4w_outputs, list):
assert len(self.ai4w_outputs) == 1
model_output_shape = tuple(self.ai4w_outputs[0].shape.as_list()[1:])
if getattr(self, 'quantiles', None) is not None:
assert model_output_shape[0] == len(self.quantiles) * self.num_outs
elif self.mode == 'classification':
activation = self.layers[-1].get_config()['activation']
if self.is_binary_:
if activation == "softmax":
assert model_output_shape[0] == self.num_classes_, f"""inferred number of classes are
{self.num_classes_} while model's output has {model_output_shape[0]} nodes """
else:
if outputs.shape[1] > model_output_shape[0]:
outputs = np.argmax(outputs, 1).reshape(-1, 1)
assert model_output_shape[0] == outputs.shape[1]
elif len(model_output_shape) != len(outputs.shape):
assert model_output_shape == outputs.shape[1:], f"""
ShapeMismatchError: Shape of model's output is {model_output_shape}
while the prepared targets have shape {outputs.shape[1:]}."""
return outputs
def load_best_weights(self) -> None:
if self.config['backend'] != 'pytorch':
# load the best weights so that the best weights can be used during model.predict calls
best_weights = find_best_weight(os.path.join(self.path, 'weights'))
if best_weights is None:
warnings.warn("best weights could not be found and are not loaded", UserWarning)
else:
self.allow_weight_loading = True
self.update_weights(os.path.join(self.w_path, best_weights))
return
def fit_ml_models(self, inputs, outputs, **kwargs):
# following arguments are strictly about nn so we don't need to save them in config file
# so that it does not confuse the reader.
for arg in ["composite", "optimizer", "lr", "epochs"]:
if arg in self.config:
self.config.pop(arg)
if len(outputs) == outputs.size:
outputs = outputs.reshape(-1, )
self._maybe_change_residual_threshold(outputs)
history = self._model.fit(inputs, outputs, **kwargs)
if self._model.__class__.__name__.startswith("XGB") and inputs.__class__.__name__ == "ndarray":
# by default feature_names of booster as set to f0, f1,...
self._model.get_booster().feature_names = self.input_features
if self.verbosity >= 0:
self._save_ml_model()
return history
def _save_ml_model(self):
"""Saves the non-NN/ML models in the disk."""
import joblib # some modules don't have joblibe in their requires
fname = os.path.join(self.w_path, self.model_name)
if "tpot" not in self.model_name:
try:
joblib.dump(self._model, fname)
except PicklingError:
print(f"could not pickle {self.model_name} model")
return
def cross_val_score(
self,
x=None,
y=None,
data: Union[pd.DataFrame, np.ndarray, str] = None,
scoring: Union [str, list] = None,
refit: bool = False,
process_results:bool = False
) -> list:
"""computes cross validation score
Parameters
----------
x :
input data
y :
output corresponding to ``x``.
data :
raw unprepared data which will be given to :py:class:`ai4water.preprocessing.DataSet`
to prepare x,y from it.
scoring :
performance metric to use for cross validation.
If None, it will be taken from config['val_metric']
refit : bool, optional (default=False
If True, the model will be trained on the whole training+validation
data after calculating cross validation score.
process_results : bool, optional
whether to process results at each cv iteration or not
Returns
-------
list
cross validation score for each of metric in scoring
Example
-------
>>> from ai4water.datasets import busan_beach
>>> from ai4water import Model
>>> model = Model(model="RandomForestRegressor",
>>> cross_validator={"KFold": {"n_splits": 5}})
>>> model.cross_val_score(data=busan_beach())
Note
----
Currently not working for deep learning models.
"""
if self.mode == "classification":
Metrics = ClassificationMetrics
else:
Metrics = RegressionMetrics
if scoring is None:
scoring = self.val_metric
if not isinstance(scoring, list):
scoring = [scoring]
scores = []
if self.config['cross_validator'] is None:
raise ValueError("Provide the `cross_validator` argument to the `Model` class upon initiation")
cross_validator = list(self.config['cross_validator'].keys())[0]
cross_validator_args = self.config['cross_validator'][cross_validator]
if data is None: # prepared data is given
from .utils.utils import TrainTestSplit
splitter = TrainTestSplit(test_fraction=1.0 - self.config['train_fraction'])
splits = getattr(splitter, cross_validator)(x, y, **cross_validator_args)
else: # we need to prepare data first as x,y
if callable(cross_validator):
splits = cross_validator(**cross_validator_args)
else:
ds = DataSet(data=data, **self.data_config)
splits = getattr(ds, f'{cross_validator}_splits')(**cross_validator_args)
for fold, ((train_x, train_y), (test_x, test_y)) in enumerate(splits):
verbosity = self.verbosity
self.verbosity = 0
# make a new classifier/regressor at every fold
self.build(self._get_dummy_input_shape())
self.verbosity = verbosity
if self.category == "ML":
self._maybe_change_residual_threshold(train_y)
self.fit(x=train_x, y=train_y.reshape(-1, ))
else:
self.fit(x=train_x, y=train_y)
# since we have access to true y, it is better to provide it
# it will be used for processing of results
pred = self.predict(x=test_x, y=test_y, process_results=process_results)
metrics = Metrics(test_y.reshape(-1, 1), pred)
val_scores = []
for score in scoring:
val_scores.append(getattr(metrics, score)())
scores.append(val_scores)
if self.verbosity > 0:
print(f'fold: {fold} val_score: {val_scores}')
if self.verbosity >= 0:
# save all the scores as json in model path`
cv_name = str(cross_validator)
fname = os.path.join(self.path, f'{cv_name}_scores.json')
with open(fname, 'w') as fp:
json.dump(scores, fp, indent=True)
## set it as class attribute so that it can be used
setattr(self, f'cross_val_scores', scores)
if refit:
self.fit_on_all_training_data(data=data)
if self.verbosity >= 0:
# Even if we do not run .fit(), then we should still have model saved in
# the disk so that it can be used.
self._save_ml_model()
scores = np.array(scores)
cv_scores_ = np.nanmean(scores, axis=0)
max_val = max(cv_scores_)
avg_val = np.nanmean(cv_scores_).item()
if np.isinf(cv_scores_).any():
# if there is inf, we should not fill it with very large value (999999999)
# but it should be max(so far experienced) value
cv_scores_no_inf = cv_scores_.copy()
cv_scores_no_inf[np.isinf(cv_scores_no_inf)] = np.nan
cv_scores_no_inf[np.isnan(cv_scores_no_inf)] = np.nanmax(cv_scores_no_inf)
max_val = max(cv_scores_no_inf)
# check for both infinity and nans separately
# because they come due to different reasons
cv_scores = []
for cv_score, metric_name in zip(cv_scores_, scoring):
# math.isinf(np.nan) will be false therefore
# first check if cv_score is nan or not, if true, fill it with avg_val
if math.isnan(cv_score):
cv_score = fill_val(metric_name, default_min=avg_val)
# then check if cv_score is infinity because
elif math.isinf(cv_score):
cv_score = fill_val(metric_name, default_min=max_val)
cv_scores.append(cv_score)
return cv_scores
def fit_on_all_training_data(self, x=None, y=None, data=None, **kwargs):
"""
This function trains the model on training + validation data.
Parameters
----------
x :
x data which is supposed to be consisting of training and validation.
If not given, then ``data`` must be given.
y :
label/target data corresponding to x data.
data :
raw data which will be passed to `py:meth:ai4water.preprocessing.DataSet`
to get training and validation x,y pairs.
The x data from training and validation is concatenated.
Similarly, y data from training and validation is concatenated
**kwargs
any keyword arguments for ``fit`` method.
"""
if data is None:
assert x is not None, f"""
if data is not given, both x, y pairs must be given.
The provided x,y pairs are of type {type(x)}, {type(y)} respectively."""
return self.fit(x=x, y=y, **kwargs)
x_train, y_train = self.training_data(data=data)
x_val, y_val = self.validation_data()
if isinstance(x_train, list):
x = []
for val in range(len(x_train)):
if x_val is not None:
_val = np.concatenate([x_train[val], x_val[val]])
x.append(_val)
else:
_val = x_train[val]
y = y_train
if hasattr(y_val, '__len__') and len(y_val) > 0:
y = np.concatenate([y_train, y_val])
elif isinstance(x_train, np.ndarray):
x, y = x_train, y_train
# if not validation data is available then use only training data
if x_val is not None:
if hasattr(x_val, '__len__') and len(x_val)>0:
x = np.concatenate([x_train, x_val])
y = np.concatenate([y_train, y_val])
else:
raise NotImplementedError
if 'Dataset' in x.__class__.__name__:
pass
else:
AttribtueSetter(self, y)
if self.is_binary_:
if len(y) != y.size: # when sigmoid is used for binary
# convert the output to 1d
y = np.argmax(y, 1).reshape(-1, 1)
return self.fit(x=x, y=y, **kwargs)
def _maybe_change_residual_threshold(self, outputs) -> None:
# https://stackoverflow.com/a/64396757/5982232
if self.residual_threshold_not_set_:
old_value = self._model.residual_threshold or mad(outputs.reshape(-1, ).tolist())
if np.isnan(old_value) or old_value < 0.001:
self._model.set_params(residual_threshold=0.001)
if self.verbosity > 0:
print(f"""changing residual_threshold from {old_value} to
{self._model.residual_threshold}""")
return
def score(self, x=None, y=None, data='test', **kwargs):
"""since preprocessing is part of Model, so the trained model with
sklearn as backend must also be able to apply preprocessing on inputs
before calculating score from sklearn. Currently it just calls the
`score` function of sklearn by first transforming x and y."""
if self.category == "ML" and hasattr(self, '_model'):
x,y, _, _, _ = self._fetch_data(data, x=x,y=y, data=data)
x = self._transform_x(x)
y = self._transform_y(y)
return self._model.score(x, y, **kwargs)
raise NotImplementedError(f"can not calculate score")
def predict_proba(self, x=None, data='test', **kwargs):
"""since preprocessing is part of Model, so the trained model with
sklearn/xgboost/catboost/lgbm as backend must also be able to apply
preprocessing on inputs before calling predict_proba from underlying library.
Currently it just calls the `predict_proba` function of underlying library
by first transforming x
"""
if self.category == "ML" and hasattr(self, '_model'):
x, _, _, _, _ = self._fetch_data(data, x=x, data=data)
x = self._transform_x(x)
return self._model.predict_proba(x, **kwargs)
raise NotImplementedError(f"can not calculate proba")
def predict_log_proba(self, x=None, data='test', **kwargs):
"""since preprocessing is part of Model, so the trained model with
sklearn/xgboost/catboost/lgbm as backend must also be able to apply
preprocessing on inputs before calling predict_log_proba from underlying library.
Currently it just calls the `log_proba` function of underlying library
by first transforming x
"""
if self.category == "ML" and hasattr(self, '_model'):
x, _, _, _, _ = self._fetch_data(data, x=x, data=data)
x = self._transform_x(x)
return self._model.predict_log_proba(x, **kwargs)
raise NotImplementedError(f"can not calculate log_proba")
def evaluate(
self,
x=None,
y=None,
data=None,
metrics=None,
**kwargs
):
"""
Evaluates the performance of the model on a given data.
calls the ``evaluate`` method of underlying `model`. If the `evaluate`
method is not available in underlying `model`, then `predict` is called.
Arguments:
x:
inputs
y:
outputs/true data corresponding to `x`
data:
Raw unprepared data which will be fed to :py:class:`ai4water.preprocessing.DataSet`
to prepare x and y. If ``x`` and ``y`` are given, this argument will have no meaning.
metrics:
the metrics to evaluate. It can a string indicating the metric to
evaluate. It can also be a list of metrics to evaluate. Any metric
name from RegressionMetrics_ or ClassificationMetrics_ can be given.
It can also be name of group of metrics to evaluate.
Following groups are available
- ``minimal``
- ``all``
- ``hydro_metrics``
If this argument is given, the `evaluate` function of the underlying class
is not called. Rather the model is evaluated manually for given metrics.
Otherwise, if this argument is not given, then evaluate method of underlying
model is called, if available.
kwargs:
any keyword argument for the `evaluate` method of the underlying
model.
Returns:
If `metrics` is not given then this method returns whatever is returned
by `evaluate` method of underlying model. Otherwise the model is evaluated
for given metric or group of metrics and the result is returned
Examples
--------
>>> import numpy as np
>>> from ai4water import Model
>>> from ai4water.models import MLP
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> model = Model(model=MLP(),
... input_features=data.columns.tolist()[0:-1],
... output_features=data.columns.tolist()[-1:])
>>> model.fit(data=data)
for evaluation on test data
>>> model.evaluate(data=data)
...
evaluate on any metric from SeqMetrics_ library
>>> model.evaluate(data=data, metrics='pbias')
...
... # to evaluate on custom data, the user can provide its own x and y
>>> new_inputs = np.random.random((10, 13))
>>> new_outputs = np.random.random((10, 1, 1))
>>> model.evaluate(new_inputs, new_outputs)
backward compatability
Since the ai4water's Model is supposed to behave same as Keras' Model
the following expressions are equally valid.
>>> model.evaluate(x, y=y)
>>> model.evaluate(x=x, y=y)
.. _SeqMetrics:
https://seqmetrics.readthedocs.io/en/latest/index.html
.. _RegressionMetrics:
https://seqmetrics.readthedocs.io/en/latest/rgr.html#regressionmetrics
.. _ClassificationMetrics:
https://seqmetrics.readthedocs.io/en/latest/cls.html#classificationmetrics
"""
return self.call_evaluate(x=x, y=y, data=data, metrics=metrics, **kwargs)
def evaluate_on_training_data(self, data, metrics=None, **kwargs):
"""evaluates the model on training data.
Parameters
----------
data:
Raw unprepared data which will be fed to :py:class:`ai4water.preprocessing.DataSet`
to prepare x and y. If ``x`` and ``y`` are given, this argument will have no meaning.
metrics:
the metrics to evaluate. It can a string indicating the metric to
evaluate. It can also be a list of metrics to evaluate. Any metric
name from RegressionMetrics_ or ClassificationMetrics_ can be given.
It can also be name of group of metrics to evaluate.
Following groups are available
- ``minimal``
- ``all``
- ``hydro_metrics``
If this argument is given, the `evaluate` function of the underlying class
is not called. Rather the model is evaluated manually for given metrics.
Otherwise, if this argument is not given, then evaluate method of underlying
model is called, if available.
kwargs:
any keyword argument for the `evaluate` method of the underlying
model.
Returns
-------
If `metrics` is not given then this method returns whatever is returned
by `evaluate` method of underlying model. Otherwise the model is evaluated
for given metric or group of metrics and the result is returned as float
or dictionary
Examples
--------
>>> from ai4water import Model
>>> from ai4water.models import MLP
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> model = Model(model=MLP(),
... input_features=data.columns.tolist()[0:-1],
... output_features=data.columns.tolist()[-1:])
>>> model.fit(data=data)
... # for evaluation on training data
>>> model.evaluate_on_training_data(data=data)
>>> model.evaluate(data=data, metrics='pbias')
"""
x, y = self.training_data(data=data)
return self.call_evaluate(x=x, y=y, metrics=metrics, **kwargs)
def evaluate_on_validation_data(self, data, metrics=None, **kwargs):
"""evaluates the model on validation data.
Parameters
----------
data:
Raw unprepared data which will be fed to :py:class:`ai4water.preprocessing.DataSet`
to prepare x and y. If ``x`` and ``y`` are given, this argument will have no meaning.
metrics:
the metrics to evaluate. It can a string indicating the metric to
evaluate. It can also be a list of metrics to evaluate. Any metric
name from RegressionMetrics_ or ClassificationMetrics_ can be given.
It can also be name of group of metrics to evaluate.
Following groups are available
- ``minimal``
- ``all``
- ``hydro_metrics``
If this argument is given, the `evaluate` function of the underlying class
is not called. Rather the model is evaluated manually for given metrics.
Otherwise, if this argument is not given, then evaluate method of underlying
model is called, if available.
kwargs:
any keyword argument for the `evaluate` method of the underlying
model.
Returns
-------
If `metrics` is not given then this method returns whatever is returned
by `evaluate` method of underlying model. Otherwise the model is evaluated
for given metric or group of metrics and the result is returned as float
or dictionary
Examples
--------
>>> from ai4water import Model
>>> from ai4water.models import MLP
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> model = Model(model=MLP(),
... input_features=data.columns.tolist()[0:-1],
... output_features=data.columns.tolist()[-1:])
>>> model.fit(data=data)
... # for evaluation on validation data
>>> model.evaluate_on_validation_data(data=data)
>>> model.evaluate_on_validation_data(data=data, metrics='pbias')
"""
x, y = self.validation_data(data=data)
if not _find_num_examples(x):
raise DataNotFound("Validation")
return self.call_evaluate(x=x, y=y, metrics=metrics, **kwargs)
def evaluate_on_test_data(self, data, metrics=None, **kwargs):
"""evaluates the model on test data.
Parameters
----------
data:
Raw unprepared data which will be fed to :py:class:`ai4water.preprocessing.DataSet`
to prepare x and y. If ``x`` and ``y`` are given, this argument will have no meaning.
metrics:
the metrics to evaluate. It can a string indicating the metric to
evaluate. It can also be a list of metrics to evaluate. Any metric
name from RegressionMetrics_ or ClassificationMetrics_ can be given.
It can also be name of group of metrics to evaluate.
Following groups are available
- ``minimal``
- ``all``
- ``hydro_metrics``
If this argument is given, the `evaluate` function of the underlying class
is not called. Rather the model is evaluated manually for given metrics.
Otherwise, if this argument is not given, then evaluate method of underlying
model is called, if available.
kwargs:
any keyword argument for the `evaluate` method of the underlying
model.
Returns
-------
If `metrics` is not given then this method returns whatever is returned
by `evaluate` method of underlying model. Otherwise the model is evaluated
for given metric or group of metrics and the result is returned as float
or dictionary
Examples
--------
>>> from ai4water import Model
>>> from ai4water.models import MLP
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> model = Model(model=MLP(),
... input_features=data.columns.tolist()[0:-1],
... output_features=data.columns.tolist()[-1:])
>>> model.fit(data=data)
... # for evaluation on test data
>>> model.evaluate_on_test_data(data=data)
>>> model.evaluate_on_test_data(data=data, metrics='pbias')
"""
x, y = self.test_data(data=data)
if not _find_num_examples(x):
raise DataNotFound("Test")
return self.call_evaluate(x=x, y=y, metrics=metrics, **kwargs)
def evaluate_on_all_data(self, data, metrics=None, **kwargs):
"""evaluates the model on all i.e. training+validation+test data.
Examples
--------
>>> from ai4water import Model
>>> from ai4water.models import MLP
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> model = Model(model=MLP(),
... input_features=data.columns.tolist()[0:-1],
... output_features=data.columns.tolist()[-1:])
>>> model.fit(data=data)
... # for evaluation on all data
>>> print(model.evaluate_on_all_data(data=data)))
>>> print(model.evaluate_on_all_data(data=data, metrics='pbias'))
"""
x, y = self.all_data(data=data)
return self.call_evaluate(x=x, y=y, metrics=metrics, **kwargs)
def call_evaluate(self, x=None,y=None, data=None, metrics=None, **kwargs):
if x is None and data is None:
data = "test"
source = 'test'
if isinstance(data, str) and data in ['training', 'validation', 'test']:
source = data
warnings.warn(f"""
argument {data} is deprecated and will be removed in future. Please
use 'evaluate_on_{data}_data' method instead.""")
x, y, _, _, user_defined = self._fetch_data(source, x, y, data)
if user_defined:
pass
elif len(x) == 0 and source == "test":
warnings.warn("No test data found. using validation data instead",
UserWarning)
data = "validation"
source = data
x, y, _, _, _ = self._fetch_data(source=source, data=data)
if len(x) == 0:
warnings.warn("No test and validation data found. using training data instead",
UserWarning)
data = "training"
source = data
x, y, _, _, _ = self._fetch_data(source=source, x=x, y=y, data=data)
if not getattr(self, 'is_fitted', True):
AttribtueSetter(self, y)
# dont' make call to underlying evaluate function rather manually
# evaluate the given metrics
if metrics is not None:
return self._manual_eval(x, y, metrics)
# after this we call evaluate function of underlying model
# therefore we must transform inputs and outptus
x = self._transform_x(x)
y = self._transform_y(y)
if hasattr(self._model, 'evaluate'):
return self._model.evaluate(x, y, **kwargs)
return self.evaluate_fn(x, y, **kwargs)
def evalute_ml_models(self, x, y, metrics=None):
if metrics is None:
metrics = self.val_metric
return self._manual_eval(x, y, metrics)
def _manual_eval(self, x, y, metrics):
"""manual evaluation"""
t, p = self.predict(x=x, y=y, return_true=True, process_results=False)
if self.mode == "regression":
errs = RegressionMetrics(t, p)
else:
errs = ClassificationMetrics(t, p, multiclass=self.is_multiclass_)
if isinstance(metrics, str):
if metrics in ['minimal', 'all', 'hydro_metrics']:
results = getattr(errs, f"calculate_{metrics}")()
else:
results = getattr(errs, metrics)()
elif isinstance(metrics, list):
results = {}
for m in metrics:
results[m] = getattr(errs, m)()
elif callable(metrics):
results = metrics(x, t)
else:
raise ValueError(f"unknown metrics type {metrics}")
return results
def predict(
self,
x=None,
y=None,
data: Union[str, pd.DataFrame, np.ndarray, DataSet] = 'test',
process_results: bool = True,
metrics: str = "minimal",
return_true: bool = False,
plots:Union[str, list] = None,
**kwargs
):
"""
Makes prediction from the trained model.
Arguments:
x:
The data on which to make prediction. if given, it will override
`data`. It can also be tf.Dataset or TorchDataset
y:
Used for pos-processing etc. if given it will overrite `data`
data:
It can also be unprepared/raw data which will be given to
:py:class:`ai4water.preprocessing.DataSet`
to prepare x,y values.
process_results: bool
post processing of results
metrics: str
only valid if process_results is True. The metrics to calculate.
Valid values are ``minimal``, ``all``, ``hydro_metrics``
return_true: bool
whether to return the true values along with predicted values
or not. Default is False, so that this method behaves sklearn type.
plots : optional (default=None)
The kind of of plots to draw. Only valid if post_process is True
kwargs : any keyword argument for ``predict`` method.
Returns:
An numpy array of predicted values.
If return_true is True then a tuple of arrays. The first is true
and the second is predicted. If ``x`` is given but ``y`` is not given,
then, first array which is returned is None.
Examples
--------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> model = Model(model="RandomForestRegressor")
>>> model.fit(data=busan_beach())
>>> pred = model.predict(data=busan_beach())
get true values
>>> true, pred = model.predict(data=busan_beach(), return_true=True)
postprocessing of results
>>> pred = model.predict(data=busan_beach(), process_results=True)
calculate all metrics during postprocessing
>>> pred = model.predict(data=busan_beach(), process_results=True, metrics="all")
using your own data
>>> import numpy as np
>>> new_input = np.random.random((10, 13))
>>> pred = model.predict(x = new_input)
"""
assert metrics in ("minimal", "all", "hydro_metrics")
return self.call_predict(
x=x,
y=y,
data=data,
process_results=process_results,
metrics=metrics,
return_true=return_true,
plots=plots,
**kwargs)
def predict_on_training_data(
self,
data,
process_results=True,
return_true=False,
metrics="minimal",
plots: Union[str, list] = None,
**kwargs
):
"""makes prediction on training data.
Parameters
----------
data :
raw, unprepared data from which training data (x,y paris) will be generated.
process_results : bool, optional
whether to post-process the results or not
return_true : bool, optional
If true, the returned value will be tuple, first is true and second is predicted array
metrics : str, optional
the metrics to calculate during post-processing
plots : optional (default=None)
The kind of of plots to draw. Only valid if post_process is True
Following plots are avialble.
``residual``
``regression``
``prediction``
``errors``
``fdc``
``murphy``
``edf``
**kwargs :
any keyword argument for .predict method.
"""
x, y = self.training_data(data=data)
return self.call_predict(
x=x,
y=y,
process_results=process_results,
return_true=return_true,
metrics=metrics,
plots=plots,
prefix="training",
**kwargs
)
def predict_on_validation_data(
self,
data,
process_results=True,
return_true=False,
metrics="minimal",
plots: Union[str, list] = None,
**kwargs
):
"""makes prediction on validation data.
Parameters
----------
data :
raw, unprepared data from which validation data (x,y paris) will be generated.
process_results : bool, optional
whether to post-process the results or not
return_true : bool, optional
If true, the returned value will be tuple, first is true and second is predicted array
metrics : str, optional
the metrics to calculate during post-processing
plots : optional (default=None)
The kind of of plots to draw. Only valid if post_process is True
Following plots are avialble.
``residual``
``regression``
``prediction``
``errors``
``fdc``
``murphy``
``edf``
**kwargs :
any keyword argument for .predict method.
"""
x, y = self.validation_data(data=data)
if not _find_num_examples(x):
raise DataNotFound("Validation")
return self.call_predict(
x=x,
y=y,
process_results=process_results,
return_true=return_true,
metrics=metrics,
plots=plots,
prefix="validation",
**kwargs
)
def predict_on_test_data(
self,
data,
process_results=True,
return_true=False,
metrics="minimal",
plots: Union[str, list] = None,
**kwargs
):
"""makes prediction on test data.
Parameters
----------
data :
raw, unprepared data from which test data (x,y paris) will be generated.
process_results : bool, optional
whether to post-process the results or not
return_true : bool, optional
If true, the returned value will be tuple, first is true and second is predicted array
metrics : str, optional
the metrics to calculate during post-processing
plots : optional (default=None)
The kind of of plots to draw. Only valid if post_process is True
Following plots are avialble.
``residual``
``regression``
``prediction``
``errors``
``fdc``
``murphy``
``edf``
**kwargs :
any keyword argument for .predict method.
"""
if isinstance(data, _DataSet):
x, y = data.test_data()
else:
x, y = self.test_data(data=data)
if not _find_num_examples(x):
raise DataNotFound(f"test")
return self.call_predict(
x=x,
y=y,
process_results=process_results,
return_true=return_true,
metrics=metrics,
plots=plots,
prefix="test",
**kwargs
)
def predict_on_all_data(
self,
data,
process_results=True,
return_true=False,
metrics="minimal",
plots: Union[str, list] = None,
**kwargs
):
"""
It makes prediction on training+validation+test data.
Parameters
----------
data :
raw, unprepared data from which x,y paris will be generated.
process_results : bool, optional
whether to post-process the results or not
return_true : bool, optional
If true, the returned value will be tuple, first is true and second is predicted array
metrics : str, optional
the metrics to calculate during post-processing
plots : optional (default=None)
The kind of of plots to draw. Only valid if post_process is True
Following plots are avialble.
``residual``
``regression``
``prediction``
``errors``
``fdc``
``murphy``
``edf``
**kwargs :
any keyword argument for .predict method.
"""
x, y = self.all_data(data=data)
return self.call_predict(
x=x,
y=y,
process_results=process_results,
return_true=return_true,
metrics=metrics,
plots=plots,
prefix="all",
**kwargs
)
def call_predict(
self,
x=None,
y=None,
data=None,
process_results=True,
metrics="minimal",
return_true: bool = False,
plots=None,
prefix=None,
**kwargs
):
source = 'test'
if x is None and data is None:
data = "test"
if isinstance(data, str) and data in ['training', 'validation', 'test']:
warnings.warn(f"""
argument {data} is deprecated and will be removed in future. Please
use 'predict_on_{data}_data' method instead.""")
source = data
inputs, true_outputs, _prefix, transformation_key, user_defined_data = self._fetch_data(
source=source,
x=x,
y=y,
data=data
)
if user_defined_data:
pass
elif len(inputs) == 0 or (isinstance(inputs, list) and len(inputs[0]) == 0) and source == "test":
warnings.warn("No test data found. using validation data instead",
UserWarning)
data = "validation"
source = data
inputs, true_outputs, _prefix, transformation_key, user_defined_data = self._fetch_data(
source=source,
x=x,
y=y,
data=data)
# if we still have no data, then we use training data instead
if len(inputs)==0 or (isinstance(inputs, list) and len(inputs[0]) == 0):
warnings.warn("""
No test and validation data found. using training data instead""",
UserWarning)
data = "training"
source = data
inputs, true_outputs, _prefix, transformation_key, user_defined_data = self._fetch_data(
source=source,
x=x,
y=y,
data=data)
if not getattr(self, 'is_fitted', True):
# prediction without fitting
AttribtueSetter(self, y)
prefix = prefix or _prefix
inputs = self._transform_x(inputs)
if true_outputs is not None:
true_outputs = self._transform_y(true_outputs)
if self.category == 'DL':
# some arguments specifically for DL models
if 'verbose' not in kwargs:
kwargs['verbose'] = self.verbosity
if 'batch_size' in kwargs: # if given by user
... #self.config['batch_size'] = kwargs['batch_size'] # update config
elif K.BACKEND == "tensorflow":
if isinstance(inputs, tf.data.Dataset):
...
else: # otherwise use from config
kwargs['batch_size'] = self.config['batch_size']
predicted = self.predict_fn(x=inputs, **kwargs)
else:
if self._model.__class__.__name__.startswith("XGB") and isinstance(inputs, np.ndarray):
# since we have changed feature_names of booster,
kwargs['validate_features'] = False
predicted = self.predict_ml_models(inputs, **kwargs)
true_outputs, predicted = self._inverse_transform_y(
true_outputs,
predicted)
if true_outputs is None:
if return_true:
return true_outputs, predicted
return predicted
if isinstance(true_outputs, dict):
dt_index = np.arange(set([len(v) for v in true_outputs.values()]).pop())
else:
dt_index = np.arange(len(true_outputs)) # dummy/default index when data is user defined
if not user_defined_data:
dt_index = self.dh_.indexes[transformation_key]
#true_outputs, dt_index = self.dh_.deindexify(true_outputs, key=transformation_key)
if isinstance(true_outputs, np.ndarray) and true_outputs.dtype.name == 'object':
true_outputs = true_outputs.astype(predicted.dtype)
if true_outputs is None:
process_results = False
if process_results:
# initialize post-processes
pp = ProcessPredictions(
mode=self.mode,
path=self.path,
forecast_len=self.forecast_len,
output_features=self.output_features,
plots=plots,
show=bool(self.verbosity),
)
pp(true_outputs, predicted, metrics, prefix, dt_index, inputs, model=self)
if return_true:
return true_outputs, predicted
return predicted
def predict_ml_models(self, inputs, **kwargs):
"""So that it can be overwritten easily for ML models."""
return self.predict_fn(inputs, **kwargs)
def plot_model(self, nn_model, show=False, figsize=None, **kwargs) -> None:
if int(tf.__version__.split('.')[1]) > 14 and 'dpi' not in kwargs:
kwargs['dpi'] = 300
if 'to_file' not in kwargs:
kwargs['to_file'] = os.path.join(self.path, "model.png")
try:
keras.utils.plot_model(
nn_model,
show_shapes=True,
**kwargs)
drawn = True
except (AssertionError, ImportError) as e:
print(f"dot plot of model could not be plotted due to {e}")
drawn = False
if drawn and show:
import matplotlib.image as mpimg
from easy_mpl import imshow
img = mpimg.imread(os.path.join(self.path, "model.png"))
kwargs = {}
if figsize:
kwargs['figsize'] = figsize
ax,_ = imshow(img, show=False, xticklabels=[], yticklabels=[], **kwargs)
ax.axis('off')
plt.tight_layout()
plt.show()
return
def get_opt_args(self) -> dict:
"""get input arguments for an optimizer.
It is being explicitly defined here so that it can be overwritten
in sub-classes
"""
kwargs = {'lr': self.config['lr']}
if self.config['backend'] == 'tensorflow' and int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) >= 250:
kwargs['learning_rate'] = kwargs.pop('lr')
if self.config['backend'] == 'pytorch':
kwargs.update({'params': self.parameters()}) # parameters from pytorch model
return kwargs
def get_metrics(self) -> list:
"""Returns the performance metrics to be monitored."""
_metrics = self.config['monitor']
metrics = None
if _metrics is not None:
if not isinstance(_metrics, list):
assert isinstance(_metrics, str)
_metrics = [_metrics]
from ai4water.utils.tf_losses import nse, kge, pbias, tf_r2
metrics_with_names = {'nse': nse,
'kge': kge,
"r2": tf_r2,
'pbias': pbias}
metrics = []
for m in _metrics:
if m in metrics_with_names.keys():
metrics.append(metrics_with_names[m])
else:
metrics.append(m)
return metrics
def view(
self,
layer_name: Union[list, str] = None,
data = None,
data_type: str = 'training',
x=None,
y=None,
examples_to_view=None,
show=False
):
"""shows all activations, weights and gradients of the model.
Arguments:
layer_name:
the layer to view. If not given, all the layers will be viewed.
This argument is only required when the model consists of layers of neural
networks.
data :
the data to use when making calls to model for activation calculation
or for gradient calculation.
data_type : str
It can either ``training``, ``validation`` or ``test`` or ``all``.
x:
input, alternative to data. If given it will override `data` argument.
y:
target/observed/label, alternative to data. If given it will
override `data` argument.
examples_to_view :
the examples to view.
show:
whether to show the plot or not!
Returns:
An isntance of Visualize :py:class:`ai4water.postprocessing.visualize.Visualize` class.
"""
from ai4water.postprocessing.visualize import Visualize
visualizer = Visualize(model=self, show=show)
visualizer(layer_name,
data=data,
data_type=data_type,
x=x,
y=y,
examples_to_use=examples_to_view
)
return visualizer
def interpret(
self,
**kwargs
):
"""
Interprets the underlying model. Call it after training.
Returns:
An instance of :py:class:`ai4water.postprocessing.interpret.Interpret` class
Example:
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> model = Model(model=...)
>>> model.fit(data=busan_beach())
>>> model.interpret()
"""
# importing ealier will try to import np types as well again
from ai4water.postprocessing import Interpret
return Interpret(self)
def explain(self, *args, **kwargs):
"""Calls the :py:func:ai4water.postprocessing.explain.explain_model` function
to explain the model.
Example
-------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> model = Model(model="RandomForestRegressor")
>>> model.fit(data=data)
>>> model.explain(total_data=data, examples_to_explain=2)
"""
from ai4water.postprocessing.explain import explain_model
return explain_model(self, *args, **kwargs)
def _save_indices(self):
# saves the training and test indices to a json file
indices = {}
if hasattr(self, 'dh_'):
for idx in ['train_indices', 'test_indices']:
if hasattr(self.dh_, idx):
idx_values = getattr(self.dh_, idx)
if idx_values is not None and not isinstance(idx_values, str):
idx_values = np.array(idx_values, dtype=int).tolist()
else:
idx_values = None
indices[idx] = idx_values
dict_to_file(indices=indices, path=self.path)
return
def save_config(self, history: dict = None):
"""saves the current state of model in a json file.
By current state, we mean, train and test indices (if available),
hyperparameters of related to model and data and current performance
statistics. All the data is stored in model.path.
"""
self._save_indices()
config = dict()
if history is not None:
config['min_loss'] = None
config['min_val_loss'] = None
min_loss_array = history.get('min_loss_array', None)
val_loss_array = history.get('val_loss', None)
if val_loss_array is not None and not all(np.isnan(val_loss_array)):
config['min_val_loss'] = np.nanmin(val_loss_array)
if min_loss_array is not None and not all(np.isnan(min_loss_array)):
config['min_loss'] = np.nanmin(min_loss_array)
config['config'] = self.config.copy()
# it is calculated during run time
config['config']['val_metric'] = self.val_metric
config['method'] = self.method
# we don't want our saved config to have 'path' key in it
if 'path' in config['config']:
config['config'].pop('path')
if self.category == "DL":
config['loss'] = self.loss_name()
if self.category == "ML" and self.is_custom_model:
config['config']['model'] = self.model_name
# following parameters are not set during build and are not in "config" which
# builds the model because they are set during run time from data
# putting them in metaconfig is necessary because when we build model
# from config, and want to make some plots such as roc curve,
# it will need classes_ attribute.
for attr in ['classes_', 'num_classes_', 'is_binary_',
'is_multiclass_', 'is_multilabel_']:
config[attr] = getattr(self, attr, None)
dict_to_file(config=config, path=self.path)
return config
@classmethod
def from_config(
cls,
config: dict,
make_new_path: bool = False,
**kwargs
)->"BaseModel":
"""Loads the model from config dictionary i.e. model.config
Arguments
---------
config: dict
dictionary containing model's parameters i.e. model.config
make_new_path : bool, optional
whether to make new path or not?
**kwargs:
any additional keyword arguments to Model class.
Returns
-------
an instalnce of :py:class:`ai4water.Model`
Example
-------
>>> from ai4water import Model
>>> import numpy as np
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> old_model = Model(model="XGBRegressor")
>>> old_model.fit(data=data)
... # now construct a new model instance from config dictionary
>>> model = Model.from_config(old_model.config)
>>> model.update_weights()
>>> x = np.random.random((100, 14))
>>> prediction = model.predict(x=x)
"""
return cls._get_config_and_path(
cls,
config=config,
make_new_path=make_new_path,
**kwargs)
@classmethod
def from_config_file(
cls,
config_path: str,
make_new_path: bool = False,
**kwargs) -> "BaseModel":
"""
Loads the model from a config file.
Arguments
----------
config_path :
complete path of config file
make_new_path : bool, optional
If true, then it means we want to use the config
file, only to build the model and a new path will be made. We
would not normally update the weights in such a case.
**kwargs :
any additional keyword arguments for the :py:class:`ai4water.Model`
Return
------
an instance of :py:class:`ai4water.Model` class
Example
-------
>>> from ai4water import Model
>>> config_file_path = "../file/to/config.json"
>>> model = Model.from_config_file(config_file_path)
>>> x = np.random.random((100, 14))
>>> prediction = model.predict(x=x)
"""
# when an instance of Model is created, config is written, which
# will overwrite these attributes so we need to keep track of them
# so that after building the model, we can set these attributes to Model
attrs = {}
with open(config_path, 'r') as fp:
config = json.load(fp)
for attr in ['classes_', 'num_classes_', 'is_binary_', 'is_multiclass_', 'is_multilabel_']:
if attr in config:
attrs[attr] = config[attr]
model = cls._get_config_and_path(
cls,
config_path=config_path,
make_new_path=make_new_path,
**kwargs
)
for attr in ['classes_', 'num_classes_', 'is_binary_', 'is_multiclass_', 'is_multilabel_']:
if attr in attrs:
setattr(model, attr, attrs[attr])
# now we need to save the config again
model.save_config()
return model
@staticmethod
def _get_config_and_path(
cls,
config_path: str = None,
config=None,
make_new_path=False,
**kwargs
)->"BaseModel":
"""Sets some attributes of the cls so that it can be built from config.
Also fetches config and path which are used to initiate cls."""
if config is not None and config_path is not None:
raise ValueError
if config is None:
assert config_path is not None
with open(config_path, 'r') as fp:
meta_config = json.load(fp)
config = meta_config['config']
path = os.path.dirname(config_path)
else:
assert isinstance(config, dict), f"""
config must be dictionary but it is of type {config.__class__.__name__}"""
path = config['path']
# todo
# shouldn't we remove 'path' from Model's init? we just need prefix
# path is needed in clsas methods only?
if 'path' in config:
config.pop('path')
if make_new_path:
allow_weight_loading = False
path = None
else:
allow_weight_loading = True
model = cls(**config, path=path, **kwargs)
model.allow_weight_loading = allow_weight_loading
model.from_check_point = True
return model
def update_weights(self, weight_file: str = None):
"""
Updates the weights of the underlying model.
Parameters
----------
weight_file : str, optional
complete path of weight file. If not given, the
weights are updated from model.w_path directory. For neural
network based models, the best weights are updated if more
than one weight file is present in model.w_path.
Returns
-------
None
"""
if weight_file is None:
weight_file = find_best_weight(self.w_path)
weight_file_path = os.path.join(self.w_path, weight_file)
else:
if not os.path.isfile(weight_file):
raise ValueError(f'weight_file must be complete path of weight file but it is {weight_file}')
weight_file_path = weight_file
weight_file = os.path.basename(weight_file) # for printing
if not self.allow_weight_loading:
raise ValueError(f"Weights loading not allowed because allow_weight_loading is {self.allow_weight_loading}"
f"and model path is {self.path}")
if self.category == "ML":
import joblib # some modules don't have joblib in their requires
self._model = joblib.load(weight_file_path)
else:
# loads the weights of keras model from weight file `w_file`.
if self.api == 'functional' and self.config['backend'] == 'tensorflow':
self._model.load_weights(weight_file_path)
elif self.config['backend'] == 'pytorch':
self.load_state_dict(torch.load(weight_file_path))
else:
self.load_weights(weight_file_path)
if self.verbosity > 0:
print("{} Successfully loaded weights from {} file {}".format('*' * 10, weight_file, '*' * 10))
return
def eda(self, data, freq: str = None):
"""Performs comprehensive Exploratory Data Analysis.
Parameters
----------
data :
freq :
if specified, small chunks of data will be plotted instead of
whole data at once. The data will NOT be resampled. This is valid
only `plot_data` and `box_plot`. Possible values are `yearly`,
weekly`, and `monthly`.
Returns
-------
an instance of EDA :py:class:`ai4water.eda.EDA` class
"""
# importing EDA earlier will import numpy etc as well
from ai4water.eda import EDA
# todo, Uniform Manifold Approximation and Projection (UMAP) of input data
# todo, radial heatmap to show temporal trends http://holoviews.org/reference/elements/bokeh/RadialHeatMap.html
eda = EDA(data=data,
path=self.path,
in_cols=self.input_features,
out_cols=self.output_features,
save=True)
eda()
return eda
def update_info(self):
self.info["version_info"] =get_version_info()
return
def print_info(self):
class_type = ''
if isinstance(self.config['model'], dict):
if 'layers' in self.config['model']:
model_name = self.__class__.__name__
else:
model_name = list(self.config['model'].keys())[0]
else:
if isinstance(self.config['model'], str):
model_name = self.config['model']
else:
model_name = self.config['model'].__class__.__name__
if self.verbosity > 0:
print(f"""
building {self.category} model for {class_type}
{self.mode} problem using {model_name}""")
return
def get_optimizer(self):
opt_args = self.get_opt_args()
optimizer = OPTIMIZERS[self.config['optimizer']](**opt_args)
return optimizer
def optimize_hyperparameters(
self,
data: Union[tuple, list, pd.DataFrame, np.ndarray],
algorithm: str = "bayes",
num_iterations: int = 14,
process_results: bool = True,
refit: bool = True,
**kwargs
):
"""
optimizes the hyperparameters of the built model
The parameaters that needs to be optimized, must be given as space.
Arguments:
data :
It can be one of following
- raw unprepared data in the form of a numpy array or pandas dataframe
- a tuple of x,y pairs
If it is unprepared data, it is passed to :py:class:`ai4water.preprocessing.DataSet`.
which prepares x,y pairs from it. The ``DataSet`` class also
splits the data into training, validation and tests sets. If it
is a tuple of x,y pairs, it is split into training and validation.
In both cases, the loss on validation set is used as objective function.
The loss calculated using ``val_metric``.
algorithm: str, optional (default="bayes")
the algorithm to use for optimization
num_iterations: int, optional (default=14)
number of iterations for optimization.
process_results: bool, optional (default=True)
whether to perform postprocessing of optimization results or not
refit: bool, optional (default=True)
whether to retrain the model using both training and validation data
Returns:
an instance of :py:class:`ai4water.hyperopt.HyperOpt` which is used for optimization
Examples:
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> from ai4water.hyperopt import Integer, Categorical, Real
>>> model_config = {"XGBRegressor": {"n_estimators": Integer(low=10, high=20),
>>> "max_depth": Categorical([10, 20, 30]),
>>> "learning_rate": Real(0.00001, 0.1)}}
>>> model = Model(model=model_config)
>>> optimizer = model.optimize_hyperparameters(data=busan_beach())
Same can be done if a model is defined using neural networks
... lookback = 14
>>> model_config = {"layers": {
... "Input": {"input_shape": (lookback, 13)},
... "LSTM": {"config": {"units": Integer(32, 64), "activation": "relu"}},
... "Dense": {"units": 1,
... "activation": Categorical(["relu", "tanh"], name="dense1_act")}}}
>>> model = Model(model=model_config, ts_args={"lookback": lookback})
>>> optimizer = model.optimize_hyperparameters(data=busan_beach(),
... refit=False)
"""
from ._optimize import OptimizeHyperparameters # optimize_hyperparameters
if isinstance(data, list) or isinstance(data, tuple):
pass
else:
setattr(self, 'dh_', DataSet(data, **self.data_config))
_optimizer = OptimizeHyperparameters(
self,
list(self.opt_paras.values()),
algorithm=algorithm,
num_iterations=num_iterations,
process_results=process_results,
**kwargs
).fit(data=data)
algo_type = list(self.config['model'].keys())[0]
new_model_config = update_model_config(self._original_model_config['model'],
_optimizer.best_paras())
self.config['model'][algo_type] = new_model_config
new_other_config = update_model_config(self._original_model_config['other'],
_optimizer.best_paras())
self.config.update(new_other_config)
# if ts_args are optimized, update them as well
for k, v in _optimizer.best_paras().items():
if k in self.config['ts_args']:
self.config['ts_args'][k] = v
if refit:
# for ml models, we must build them again
# TODO, why not for DL models
if self.category == "ML":
self.build_ml_model()
if isinstance(data, (list, tuple)):
assert len(data)==2
x, y = data
self.fit_on_all_training_data(x=x, y=y)
else:
self.fit_on_all_training_data(data=data)
else:
raise NotImplementedError(f"""
Setting `refit` is not supported for neural network
based models. please call optimize_hyperparmeters(..., refit=False)
""")
return _optimizer
def optimize_transformations(
self,
data:Union[np.ndarray, pd.DataFrame],
transformations: Union[list, str] = None,
include: Union[str, list, dict] = None,
exclude: Union[str, list] = None,
append: dict = None,
y_transformations: Union[list, dict] = None,
algorithm: str = "bayes",
num_iterations: int = 12,
process_results: bool = True,
update_config: bool = True
):
"""optimizes the transformations for the input/output features
The 'val_score' parameter given as input to the Model is used as objective
function for optimization problem.
Arguments:
data :
It can be one of following
- raw unprepared data in the form of a numpy array or pandas dataframe
- a tuple of x,y pairs
If it is unprepared data, it is passed to :py:class:`ai4water.preprocessing.DataSet`.
which prepares x,y pairs from it. The ``DataSet`` class also
splits the data into training, validation and tests sets. If it
is a tuple of x,y pairs, it is split into training and validation.
In both cases, the loss on validation set is used as objective function.
The loss calculated using ``val_metric``.
transformations :
the transformations to consider for input features. By default,
following transformations are considered for input features
- ``minmax`` rescale from 0 to 1
- ``center`` center the data by subtracting mean from it
- ``scale`` scale the data by dividing it with its standard deviation
- ``zscore`` first performs centering and then scaling
- ``box-cox``
- ``yeo-johnson``
- ``quantile``
- ``robust``
- ``log``
- ``log2``
- ``log10``
- ``sqrt`` square root
include : list, dict, str, optional
the name/names of input features to include. If you don't want
to include any feature. Set this to an empty list
exclude: the name/names of input features to exclude
append:
the input features with custom candidate transformations. For example
if we want to try only `minmax` and `zscore` on feature `tide_cm`, then
it can be done as following
>>> append={"tide_cm": ["minmax", "zscore"]}
y_transformations:
It can either be a list of transformations to be considered for
output features for example
>>> y_transformations = ['log', 'log10', 'log2', 'sqrt']
would mean that consider `log`, `log10`, `log2` and `sqrt` are
to be considered for output transformations during optimization.
It can also be a dictionary whose keys are names of output features
and whose values are lists of transformations to be considered for output
features. For example
>>> y_transformations = {'output1': ['log2', 'log10'], 'output2': ['log', 'sqrt']}
Default is None, which means do not optimize transformation for output
features.
algorithm: str
The algorithm to use for optimizing transformations
num_iterations: int
The number of iterations for optimizatino algorithm.
process_results :
whether to perform postprocessing of optimization results or not
update_config: whether to update the config of model or not.
Returns:
an instance of HyperOpt :py:class:`ai4water.hyperopt.HyperOpt` class
which is used for optimization
Example:
>>> from ai4water.datasets import busan_beach
>>> from ai4water import Model
>>> model = Model(model="XGBRegressor")
>>> optimizer_ = model.optimize_transformations(data=busan_beach(), exclude="tide_cm")
>>> print(optimizer_.best_paras()) # find the best/optimized transformations
>>> model.fit(data=busan_beach())
>>> model.predict()
"""
from ._optimize import OptimizeTransformations # optimize_transformations
from .preprocessing.transformations.utils import InvalidTransformation
if isinstance(data, list) or isinstance(data, tuple):
pass
else:
setattr(self, 'dh_', DataSet(data=data, **self.data_config))
allowed_transforamtions = ["minmax", "center", "scale", "zscore", "box-cox", "yeo-johnson",
"quantile", "robust", "log", "log2", "log10", "sqrt", "none",
]
append = append or {}
categories = allowed_transforamtions
if transformations is not None:
assert isinstance(transformations, list)
for t in transformations:
if t not in allowed_transforamtions:
raise InvalidTransformation(t, allowed_transforamtions)
categories = transformations
if y_transformations:
if isinstance(y_transformations, list):
for t in y_transformations:
if t not in allowed_transforamtions:
raise InvalidTransformation(t, allowed_transforamtions)
for out in self.output_features:
append[out] = y_transformations
else:
assert isinstance(y_transformations, dict)
for out_feature, out_transformations in y_transformations.items():
assert out_feature in self.output_features
assert isinstance(out_transformations, list)
for t in out_transformations:
if t not in allowed_transforamtions:
raise InvalidTransformation(t, allowed_transforamtions)
append[out_feature] = out_transformations
optimizer = OptimizeTransformations(
self,
algorithm=algorithm,
num_iterations=num_iterations,
include=include,
exclude=exclude,
append=append,
categories=categories,
process_results=process_results,
).fit(data=data)
x_transformations = []
y_transformations = []
for feature, method in optimizer.best_paras().items():
if method == "none":
pass
else:
t = {'method': method, 'features': [feature]}
if method.startswith("log"):
t["treat_negatives"] = True
t["replace_zeros"] = True
elif method == "box-cox":
t["replace_zeros"] = True
t["treat_negatives"] = True
elif method == "sqrt":
t["treat_negatives"] = True
t["replace_zeros"] = True
if feature in self.input_features:
x_transformations.append(t)
else:
y_transformations.append(t)
if update_config:
self.config['x_transformation'] = x_transformations
self.config['y_transformation'] = y_transformations or None
return optimizer
def permutation_importance(
self,
data = None,
data_type: str = "test",
x=None,
y=None,
scoring: Union[str, Callable] = "r2",
n_repeats: int = 5,
noise: Union[str, np.ndarray] = None,
use_noise_only: bool = False,
weights=None,
plot_type: str = None
):
"""Calculates the permutation importance on the given data
Parameters
----------
data :
Raw unprepared data from which x,y paris of training and test
data are prepared.
data_type : str
one of `training`, `test` or `validation`. By default test data is
used based upon recommendations of Christoph Molnar's book_. Only
valid if ``data`` argument is given.
x:
inputs for the model. alternative to data
y:
target/observation data for the model. alternative to data
scoring:
the scoring to use to calculate importance
n_repeats:
number of times the permutation for each feature is performed.
noise:
the noise to add when a feature is permutated. It can be a 1D
array of length equal to len(data) or string defining the
distribution
use_noise_only:
If True, then the feature being perturbed is replaced by the noise
instead of adding the noise into the feature. This argument is only
valid if `noise` is not None.
weights:
plot_type:
if not None, it must be either ``heatmap`` or ``boxplot`` or ``bar_chart``
Returns
-------
an instance of :py:class:`ai4water.postprocessing.PermutationImprotance`
Examples
--------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> model = Model(model="XGBRegressor")
>>> model.fit(data=busan_beach())
>>> perm_imp = model.permutation_importance(data=busan_beach(),
... data_type="validation", plot_type="boxplot")
>>> perm_imp.importances
.. _book:
https://christophm.github.io/interpretable-ml-book/feature-importance.html#feature-importance-data
"""
assert data_type in ("training", "validation", "test")
if x is None:
data = getattr(self, f"{data_type}_data")(data=data)
x, y = data
from .postprocessing.explain import PermutationImportance
pm = PermutationImportance(
model=self.predict,
inputs=x,
target=y,
scoring=scoring,
n_repeats=n_repeats,
noise=noise,
use_noise_only=use_noise_only,
path=os.path.join(self.path, "explain"),
feature_names=self.input_features,
weights=weights,
seed=self.config['seed'],
save=True
)
if plot_type is not None:
assert plot_type in ("boxplot", "heatmap", "bar_chart")
if plot_type == "heatmap":
pm.plot_as_heatmap()
else:
pm.plot_1d_pimp(plot_type=plot_type)
return pm
def sensitivity_analysis(
self,
data=None,
bounds=None,
sampler="morris",
analyzer:Union[str, list]="sobol",
sampler_kwds: dict = None,
analyzer_kwds: dict = None,
save_plots: bool = True,
names: List[str] = None
)->dict:
"""performs sensitivity analysis of the model w.r.t input features in data.
The model and its hyperprameters remain fixed while the input data is changed.
Parameters
----------
data :
data which will be used to get the bounds/limits of input features. If given,
it must be 2d numpy array. It should be remembered that the given data
is not used during sensitivity analysis. But new synthetic data is prepared
on which sensitivity analysis is performed.
bounds : list,
alternative to data
sampler : str, optional
any sampler_ from SALib library. For example ``morris``, ``fast_sampler``,
``ff``, ``finite_diff``, ``latin``, ``saltelli``, ``sobol_sequence``
analyzer : str, optional
any analyzer_ from SALib lirary. For example ``sobol``, ``dgsm``, ``fast``
``ff``, ``hdmr``, ``morris``, ``pawn``, ``rbd_fast``. You can also choose
more than one analyzer. This is useful when you want to compare results
of more than one analyzers. It should be noted that having more than
one analyzers does not increases computation time except for ``hdmr``
and ``delta`` analyzers. The ``hdmr`` and ``delta`` analyzers ane computation
heavy. For example
>>> analyzer = ["morris", "sobol", "rbd_fast"]
sampler_kwds : dict
keyword arguments for sampler
analyzer_kwds : dict
keyword arguments for analyzer
save_plots : bool, optional
names : list, optional
names of input features. If not given, names of input features will be used.
Returns
-------
dict :
a dictionary whose keys are names of analyzers and values and sensitivity
results for that analyzer.
Examples
--------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> df = busan_beach()
>>> input_features=df.columns.tolist()[0:-1]
>>> output_features = df.columns.tolist()[-1:]
... # build the model
>>> model=Model(model="RandomForestRegressor",
>>> input_features=input_features,
>>> output_features=output_features)
... # train the model
>>> model.fit(data=df)
... # perform sensitivity analysis
>>> si = model.sensitivity_analysis(data=df[input_features].values,
>>> sampler="morris", analyzer=["morris", "sobol"],
>>> sampler_kwds={'N': 100})
.. _sampler:
https://salib.readthedocs.io/en/latest/api/SALib.sample.html
.. _analyzer:
https://salib.readthedocs.io/en/latest/api/SALib.analyze.html
"""
try:
import SALib
except (ImportError, ModuleNotFoundError):
warnings.warn("""
You must have SALib library installed in order to perform sensitivity analysis.
Please install it using 'pip install SALib' and make sure that it is importable
""")
return {}
from ai4water.postprocessing._sa import sensitivity_analysis, sensitivity_plots
from ai4water.postprocessing._sa import _make_predict_func
if data is not None:
if not isinstance(data, np.ndarray):
assert isinstance(data, pd.DataFrame)
data = data.values
x = data
# calculate bounds
assert isinstance(x, np.ndarray)
bounds = []
for feat in range(x.shape[1]):
bound = [np.min(x[:, feat]), np.max(x[:, feat])]
bounds.append(bound)
else:
assert bounds is not None
assert isinstance(bounds, list)
assert all([isinstance(bound, list) for bound in bounds])
analyzer_kwds = analyzer_kwds or {}
if self.lookback >1:
if self.category == "DL":
func = _make_predict_func(self, verbose=0)
else:
func = _make_predict_func(self)
else:
func = self.predict
if names is None:
names = self.input_features
results = sensitivity_analysis(
sampler,
analyzer,
func,
bounds=bounds,
sampler_kwds = sampler_kwds,
analyzer_kwds = analyzer_kwds,
names=names
)
if save_plots:
for _analyzer, result in results.items():
res_df = result.to_df()
if isinstance(res_df, list):
for idx, res in enumerate(res_df):
fname = os.path.join(self.path, f"{_analyzer}_{idx}_results.csv")
res.to_csv(fname)
else:
res_df.to_csv(os.path.join(self.path, f"{_analyzer}_results.csv"))
sensitivity_plots(_analyzer, result, self.path)
return results
def shap_values(
self,
data,
layer=None
)->np.ndarray:
"""
returns shap values
Parameters
----------
data :
raw unprepared data from which training and test data are extracted.
layer :
Returns
-------
Examples
--------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> model = Model(model="RandomForestRegressor")
>>> model.fit(data=data)
>>> model.shap_values(data=data)
"""
from .postprocessing.explain import explain_model_with_shap
explainer = explain_model_with_shap(
self,
total_data=data,
layer=layer,
)
return explainer.shap_values
def explain_example(
self,
data,
example_num:int,
method="shap"
):
"""explains a single exmaple either using shap or lime
Parameters
----------
data :
the data to use
example_num :
the example/sample number/index to explain
method :
either ``shap`` or ``lime``
Examples
--------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> model = Model(model="RandomForestRegressor")
>>> model.fit(data=data)
>>> model.explain_example(data=data, example_num=2)
"""
assert method in ("shap", "lime")
if method == "shap":
from .postprocessing.explain import explain_model_with_shap
explainer = explain_model_with_shap(
self,
total_data=data,
examples_to_explain=example_num)
else:
from .postprocessing.explain import explain_model_with_lime
explainer = explain_model_with_lime(
self,
total_data=data,
examples_to_explain=example_num)
return explainer
def partial_dependence_plot(
self,
x=None,
data=None,
data_type="all",
feature_name=None,
num_points:int=100,
show:bool = True,
):
"""Shows partial depedence plot for a feature.
Parameters
----------
x :
the input data to use. If not given, then ``data`` must be given.
data :
raw unprepared data from which x,y paris are to be made. If
given, ``x`` must not be given.
data_type : str
the kind of the data to be used. It is only valid when
``data`` is given.
feature_name : str/list
name/names of features. If only one feature is given, 1 dimensional
partial dependence plot is plotted. You can also provide a list of
two feature names, in which case 2d interaction plot will be plotted.
num_points : int
number of points. It is used to define grid.
show : bool
whether to show the plot or not!
Returns
-------
an instance of :py:class:`ai4water.postprocessing.PartialDependencePlot`
Examples
--------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> model = Model(model="RandomForestRegressor")
>>> model.fit(data=data)
>>> model.partial_dependence_plot(x=data.iloc[:, 0:-1], feature_name="tide_cm")
...
>>> model.partial_dependence_plot(data=data, feature_name="tide_cm")
"""
if x is None:
assert data is not None, f"either x or data must be given"
x, _ = getattr(self, f"{data_type}_data")(data=data)
from .postprocessing.explain import PartialDependencePlot
pdp = PartialDependencePlot(
self.predict,
data=x,
feature_names=self.input_features,
num_points=num_points,
show=show
)
if isinstance(feature_name, str):
pdp.plot_1d(feature=feature_name)
else:
assert isinstance(feature_name, list)
assert len(feature_name) == 2
pdp.plot_interaction(features=feature_name)
return pdp
def prediction_analysis(
self,
features:Union[list, str],
x:Union[np.ndarray, pd.DataFrame]=None,
y:np.ndarray=None,
data = None,
data_type:str = "all",
feature_names:Union[list, str]=None,
num_grid_points:int = None,
grid_types = "percentile",
percentile_ranges = None,
grid_ranges = None,
custom_grid:list = None,
show_percentile: bool = False,
show_outliers: bool = False,
end_point:bool = True,
which_classes=None,
ncols=2,
figsize: tuple = None,
annotate:bool = True,
annotate_kws:dict = None,
cmap="YlGn",
border=False,
show:bool=True,
save_metadata:bool=True
)->plt.Axes:
"""shows prediction distribution with respect to two input features.
Parameters
----------
x :
input data to the model.
y :
true data corresponding to ``x``.
data :
raw unprepared data from which x,y pairs for training,validation and test
are generated. It must only be given if ``x`` is not given.
data_type : str, optional (default="test")
The kind of data to be used. It is only valid if ``data`` argument is used.
It should be one of ``training``, ``validation``, ``test`` or ``all``.
features: str/list
name or names of features to investigate
feature_names: list
feature names
num_grid_points: list, optional, default=None
number of grid points for each feature
grid_types: list, optional, default=None
type of grid points for each feature
percentile_ranges: list of tuple, optional, default=None
percentile range to investigate for each feature
grid_ranges: list of tuple, optional, default=None
value range to investigate for each feature
custom_grid: list of (Series, 1d-array, list), optional, default=None
customized list of grid points for each feature
show_percentile: bool, optional, default=False
whether to display the percentile buckets for both feature
show_outliers: bool, optional, default=False
whether to display the out of range buckets for both features
end_point: bool, optional
If True, stop is the last grid point, default=True
Otherwise, it is not included
which_classes: list, optional, default=None
which classes to plot, only use when it is a multi-class problem
figsize: tuple or None, optional, default=None
size of the figure, (width, height)
ncols: integer, optional, default=2
number subplot columns, used when it is multi-class problem
annotate: bool, default=False
whether to annotate the points
annotate_kws : dict, optional
a dictionary of keyword arguments with following keys
annotate_counts : bool, default=False
whether to annotate counts or not.
annotate_colors : tuple
pair of colors
annotate_color_threshold : float
threshold value for annotation
annotate_fmt : str
format string for annotation.
annotate_fontsize : int, optinoal (default=7)
fontsize for annotation
cmap :
border :
show : bool, optional (default=True)
whether to show the plot or not
save_metadata : bool, optional, default=True
whether to save the information as csv or not
Returns
-------
tuple
a pandas dataframe and matplotlib Axes
Examples
--------
>>> from ai4water.datasets import busan_beach
>>> from ai4water import Model
...
>>> model = Model(model="XGBRegressor")
>>> model.fit(data=busan_beach())
>>> model.prediction_analysis(features="tide_cm",
... data=busan_beach(), show_percentile=True)
... # for multiple features
>>> model.prediction_analysis(
... ['tide_cm', 'sal_psu'],
... data=busan_beach(),
... annotate_kws={"annotate_counts":True,
... "annotate_colors":("black", "black"),
... "annotate_fontsize":10},
... custom_grid=[[-41.4, -20.0, 0.0, 20.0, 42.0],
... [33.45, 33.7, 33.9, 34.05, 34.4]],
... )
"""
if x is None:
assert data is not None
x, _ = getattr(self, f"{data_type}_data")(data=data)
y = self.predict(x)
if isinstance(x, np.ndarray):
x = pd.DataFrame(x, columns=self.input_features)
if feature_names is None:
if isinstance(features, str):
feature_names = features
else:
assert isinstance(features, list)
feature_names = "Feature"
if not isinstance(features, list):
features = [features]
_annotate_kws = {
'annotate_counts': True,
'annotate_colors':("black", "white"),
'annotate_color_threshold': None,
'annotate_fmt': None,
'annotate_fontsize': 7
}
if annotate_kws is None:
annotate_kws = dict()
_annotate_kws.update(annotate_kws)
if len(features) == 1:
ax, summary_df = prediction_distribution_plot(
self.mode,
inputs=x,
prediction=y,
feature=features[0],
feature_name=feature_names[0],
n_classes=self.num_classes_,
num_grid_points=num_grid_points or 10,
grid_type=grid_types,
percentile_range=percentile_ranges,
grid_range=grid_ranges,
cust_grid_points=custom_grid,
show_percentile=show_percentile,
show_outliers=show_outliers,
end_point=end_point,
figsize=figsize,
ncols=ncols,
show=False,
)
else:
ax, summary_df = feature_interaction(
self.predict,
x,
features=features,
feature_names=feature_names,
num_grid_points=num_grid_points,
grid_types=grid_types,
percentile_ranges=percentile_ranges,
grid_ranges=grid_ranges,
cust_grid_points=custom_grid,
show_percentile=show_percentile,
show_outliers=show_outliers,
end_point=end_point,
which_classes=which_classes,
ncols=ncols,
figsize=figsize,
annotate=annotate,
cmap=cmap,
border=border,
**_annotate_kws,
)
fname = f"prediction_analysis_{features[0] if len(features)==1 else features[0]+features[1]}"
if save_metadata:
summary_df.to_csv(os.path.join(self.path, f"{fname}.csv"))
if show:
plt.show()
return ax
def _transform(self, data, name_in_config):
"""transforms the data using the transformer which has already been fit"""
if name_in_config not in self.config:
raise NotImplementedError(f"""You have not trained the model using .fit.
Making predictions from model or evaluating model without training
is not allowed when applying transformations. Because the transformation
parameters are calculated using training data. Either train the model
first by using.fit() method or remove x_transformation/y_transformation
arguments.""")
transformer = Transformations.from_config(self.config[name_in_config])
return transformer.transform(data=data)
def _transform_x(self, x):
"""transforms the x data using the transformer which has already been fit"""
if self.config['x_transformation']:
return self._transform(x, 'x_transformer_')
return x
def _transform_y(self, y):
"""transforms the y according the transformer which has already been fit."""
if self.config['y_transformation']:
return self._transform(y, 'y_transformer_')
return y
def _fit_transform_x(self, x):
"""fits and transforms x and puts the transformer in config witht he key
'x_transformer_'"""
return self._fit_transform(x,
'x_transformer_',
self.config['x_transformation'],
self.input_features)
def _fit_transform_y(self, y):
"""fits and transforms y and puts the transformer in config witht he key
'y_transformer_'"""
return self._fit_transform(y,
'y_transformer_',
self.config['y_transformation'],
self.output_features)
def _fit_transform(self, data, key, transformation, feature_names):
"""fits and transforms the `data` using `transformation` and puts it in config
with config `name`."""
if data is not None and transformation:
transformer = Transformations(feature_names, transformation)
if isinstance(data, pd.DataFrame):
data = data.values
data = transformer.fit_transform(data)
self.config[key] = transformer.config()
return data
def _inverse_transform_y(self, true_outputs, predicted):
"""inverse transformation of y/labels for both true and predicted"""
if self.config['y_transformation']:
y_transformer = Transformations.from_config(self.config['y_transformer_'])
if self.config['y_transformation']: # only if we apply transformation on y
# both x,and true_y were given
true_outputs = self.__inverse_transform_y(true_outputs, y_transformer)
# because observed y may have -ves or zeros which would have been
# removed during fit and are put back into it during inverse transform, so
# in such case predicted y should not be treated by zero indices or negative indices
# of true y. In other words, parameters of true y should not have impact on inverse
# transformation of predicted y.
predicted = self.__inverse_transform_y(predicted, y_transformer, postprocess=False)
return true_outputs, predicted
def __inverse_transform_y(self,
y,
transformer,
method="inverse_transform",
postprocess=True
)->np.ndarray:
"""inverse transforms y where y is supposed to be true or predicted output
from model."""
# todo, if train_y had zeros or negatives then this will be wrong
if isinstance(y, np.ndarray):
# it is ndarray, either num_outs>1 or quantiles>1 or forecast_len>1 some combination of them
if y.size > len(y):
if y.ndim == 2:
for out in range(y.shape[1]):
y[:, out] = getattr(transformer, method)(y[:, out],
postprocess=postprocess).reshape(-1, )
else:
# (exs, outs, quantiles) or (exs, outs, forecast_len) or (exs, forecast_len, quantiles)
for out in range(y.shape[1]):
for q in range(y.shape[2]):
y[:, out, q] = getattr(transformer, method)(y[:, out, q],
postprocess=postprocess).reshape(-1, )
else: # 1d array
y = getattr(transformer, method)(y, postprocess=postprocess)
# y can be None for example when we call model.predict(x=x),
# in this case we don't know what is y
elif y is not None:
raise ValueError(f"can't inverse transform y of type {type(y)}")
return y
def training_data(self, x=None, y=None, data='training', key='train')->tuple:
"""
returns the x,y pairs for training. x,y are not used but
only given to be used if user overwrites this method for further processing
of x, y as shown below.
>>> from ai4water import Model
>>> class MyModel(Model):
>>> def training_data(self, *args, **kwargs) ->tuple:
>>> train_x, train_y = super().training_data(*args, **kwargs)
... # further process x, y
>>> return train_x, train_y
"""
return self.__fetch('training', data,key)
def validation_data(self, x=None, y=None, data='validation', key="val")->tuple:
"""
returns the x,y pairs for validation. x,y are not used but
only given to be used if user overwrites this method for further processing
of x, y as shown below.
>>> from ai4water import Model
>>> class MyModel(Model):
>>> def validation_data(self, *args, **kwargs) ->tuple:
>>> train_x, train_y = super().training_data(*args, **kwargs)
... # further process x, y
>>> return train_x, train_y
"""
return self.__fetch('validation', data, key)
def test_data(self, x=None, y=None, data='test', key="test")->tuple:
"""
returns the x,y pairs for test. x,y are not used but
only given to be used if user overwrites this method for further processing
of x, y as shown below.
>>> from ai4water import Model
>>> class MyModel(Model):
>>> def ttest_data(self, *args, **kwargs) ->tuple:
>>> train_x, train_y = super().training_data(*args, **kwargs)
... # further process x, y
>>> return train_x, train_y
"""
return self.__fetch('test', data, key)
def all_data(self, x=None, y=None, data=None)->tuple:
"""it returns all data i.e. training+validation+test
after extracting them ``data``.
Examples
--------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> model = Model(model="XGBRegressor")
>>> train_x, train_y = model.training_data(data=data)
>>> print(train_x.shape, train_y.shape)
>>> val_x, val_y = model.validation_data(data=data)
>>> print(val_x.shape, val_y.shape)
... # all_data will contain both training and validation data
>>> all_x, all_y = model.all_data(data=data)
>>> print(all_x.shape, all_y.shape
"""
train_x, train_y = self.training_data(x=x, y=y, data=data)
val_x, val_y = self.validation_data(x=x, y=y, data=data)
test_x, test_y = self.test_data(x=x, y=y, data=data)
x = []
y = []
if isinstance(train_x, list):
for val in range(len(train_x)):
# if val data is not available
if hasattr(val_x[val], '__len__') and len(val_x[val])==0:
x_val = np.concatenate([train_x[val], test_x[val]])
# if test data is not available
elif hasattr(test_x[val], '__len__') and len(test_x[val])==0:
x_val = np.concatenate([train_x[val], val_x[val]])
# supposing all three data are available
else:
x_val = np.concatenate([train_x[val], val_x[val], test_x[val]])
x.append(x_val)
else:
for _x in [train_x, val_x, test_x]:
if _x is not None and (hasattr(_x, '__len__') and len(_x)>0):
x.append(_x)
x = np.concatenate(x)
for _y in [train_y, val_y, test_y]:
if _y is not None and (hasattr(_y, '__len__') and len(_y)>0):
y.append(_y)
y = np.concatenate(y)
return x, y
def __fetch(self, source, data=None, key=None):
"""if data is string, then it must either be `trianing`, `validation` or
`test` or name of a valid dataset. Otherwise data is supposed to be raw
data which will be given to DataSet
"""
if isinstance(data, str):
if data in ['training', 'test', 'validation']:
if hasattr(self, 'dh_'):
data = getattr(self.dh_, f'{data}_data')(key=key)
else:
raise DataNotFound(source)
else:
# e.g. 'CAMELS_AUS'
dh = DataSet(data=data, **self.data_config)
setattr(self, 'dh_', dh)
data = getattr(dh, f'{source}_data')(key=key)
else:
dh = DataSet(data=data, **self.data_config)
setattr(self, 'dh_', dh)
data = getattr(dh, f'{source}_data')(key=key)
x, y = maybe_three_outputs(data, self.teacher_forcing)
return x, y
def _fetch_data(self, source:str, x=None, y=None, data=None):
"""The main idea is that the user should be able to fully customize
training/test data by overwriting training_data and test_data methods.
However, if x is given or data is DataSet then the training_data/test_data
methods of this(Model) class will not be called."""
user_defined_x = True
prefix = f'{source}_{dateandtime_now()}'
key = None
if x is None:
user_defined_x = False
key=f"5_{source}"
if isinstance(data, _DataSet):
# the user has provided DataSet from which training/test data
# needs to be extracted
setattr(self, 'dh_', data)
data = getattr(data, f'{source}_data')(key=key)
else:
data = getattr(self, f'{source}_data')(x=x, y=y, data=data, key=key)
# data may be tuple/list of three arrays
x, y = maybe_three_outputs(data, self.teacher_forcing)
return x, y, prefix, key, user_defined_x
def _maybe_reduce_nquantiles(self, num_exs:int)->None:
self.config['x_transformation'] = _reduce_nquantiles_in_config(self.config['x_transformation'], num_exs)
self.config['y_transformation'] = _reduce_nquantiles_in_config(self.config['y_transformation'], num_exs)
return
def _reduce_nquantiles_in_config(config:Union[str, list, dict], num_exs:int):
if isinstance(config, str) and config in ['quantile', 'quantile_normal']:
config = {'method': 'quantile', 'n_quantiles': num_exs}
elif isinstance(config, dict):
if 'method' not in config:
# for multiinput cases when x_transformation is defined as
# {'inp_1d': 'minmax', 'inp_2d': None}
pass # todo
elif config['method'] in ['quantile', 'quantile_normal']:
config['n_quantiles'] = min(config.get('n_quantiles', num_exs), num_exs)
elif isinstance(config, list):
for idx, transformer in enumerate(config):
if isinstance(transformer, str) and transformer in ['quantile', 'quantile_normal']:
config[idx] = {'method': 'quantile', 'n_quantiles': num_exs}
elif isinstance(transformer, dict) and transformer['method'] in ['quantile', 'quantile_normal']:
transformer['n_quantiles'] = min(transformer.get('n_quantiles', num_exs), num_exs)
config[idx] = transformer
return config
def fill_val(metric_name, default="min", default_min=99999999):
if METRIC_TYPES.get(metric_name, default) == "min":
return default_min
return 0.0
def _find_num_examples(inputs)->Union[int, None]:
"""find number of examples in inputs"""
if isinstance(inputs, (pd.DataFrame, np.ndarray)):
return len(inputs)
if isinstance(inputs, list):
return len(inputs[0])
elif hasattr(inputs, '__len__'):
return len(inputs)
return None | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/_main.py | _main.py |
import gc
import math
import importlib
from typing import Union
from SeqMetrics import RegressionMetrics, ClassificationMetrics
from .utils.utils import clear_weights, TrainTestSplit
from .utils.utils import dateandtime_now, jsonize, MATRIC_TYPES, update_model_config
DEFAULTS = {
'r2': 1.0,
'nse': 1.0,
'r2_score': 1.0
}
class ModelOptimizerMixIn(object):
def __init__(
self,
model,
algorithm,
num_iterations,
process_results,
prefix="hpo",
):
self.model = model
self.algorithm = algorithm
self.num_iterations = num_iterations
self.process_results = process_results
self.prefix = prefix
def fit(self, data=None, ):
if isinstance(data, tuple) or isinstance(data, list):
assert len(data) == 2
xy = True
else:
xy = False
PREFIX = f"{self.prefix}_{dateandtime_now()}"
self.iter = 0
print("{:<15} {:<20}".format("Iteration No.", "Validation Score"))
hpo = importlib.import_module("ai4water.hyperopt")
val_metric = self.model.val_metric
metric_type = MATRIC_TYPES.get(val_metric, 'min')
cross_validator = self.model.config['cross_validator']
cross_validate = True
if cross_validator is None:
cross_validate = False
if self.process_results:
verbosity = 0
else:
verbosity = -1
config = jsonize(self.model.config)
SEED = config['seed']
if self.model.mode == "classification":
Metrics = ClassificationMetrics
else:
Metrics = RegressionMetrics
def objective_fn(
**suggestions,
):
# we must not set the seed here to None
# this will cause random splitting unpreproducible (if random splitting is applied)
config['verbosity'] = verbosity
config['prefix'] = PREFIX
suggestions = jsonize(suggestions)
getattr(self, f'update')(config, suggestions)
_model = self.model.from_config(
config.copy(),
make_new_path=True,
)
if cross_validate:
if xy:
val_score = _model.cross_val_score(*data)[0]
else:
val_score = _model.cross_val_score(data=data)[0]
else:
if xy: # todo, it is better to split data outside objective_fn
splitter = TrainTestSplit(seed=SEED,
test_fraction=config['val_fraction'])
if config['split_random']:
# for reproducibility, we should use SEED so that at everay optimization
# iteration, we split the data in the same way
train_x, test_x, train_y, test_y = splitter.split_by_random(*data)
else:
train_x, test_x, train_y, test_y = splitter.split_by_slicing(*data)
_model.fit(x=train_x, y=train_y)
p = _model.predict(x=test_x)
else:
_model.fit(data=data)
test_y, p = _model.predict_on_validation_data(
data=data,
return_true=True,
process_results=False)
metrics = Metrics(test_y, p)
val_score = getattr(metrics, val_metric)()
orig_val_score = val_score
if metric_type != "min":
val_score = 1.0 - val_score
if not math.isfinite(val_score):
val_score = DEFAULTS.get(val_metric, 1.0)
print("{:<15} {:<20.5f} {:<20.5f}".format(self.iter, val_score, orig_val_score))
self.iter += 1
del _model
gc.collect()
return val_score
optimizer = hpo.HyperOpt(
self.algorithm,
objective_fn=objective_fn,
param_space=self.space,
num_iterations=self.num_iterations,
process_results=self.process_results,
opt_path=f"results\\{PREFIX}"
)
optimizer.fit()
clear_weights(optimizer.opt_path, optimizer.results)
return optimizer
class OptimizeHyperparameters(ModelOptimizerMixIn):
def __init__(
self,
model,
space,
algorithm,
num_iterations,
process_results=False,
**kwargs
):
super().__init__(
model=model,
algorithm=algorithm,
num_iterations=num_iterations,
process_results=process_results,
prefix="hpo"
)
self.space = space
config = jsonize(model.config)
model_config = config['model']
# algo type is name of algorithm, e.g. xgboost, randomforest or layers
self.algo_type = list(model_config.keys())[0]
self.original_model = model._original_model_config
def update(self, config, suggestions):
# first update the model config parameters
new_model_config = update_model_config(self.original_model['model'].copy(), suggestions)
config['model'] = {self.algo_type: new_model_config}
# now update hyperparameters which are not part of model config
new_other_config = update_model_config(self.original_model['other'].copy(), suggestions)
config.update(jsonize(new_other_config))
return config
class OptimizeTransformations(ModelOptimizerMixIn):
def __init__(
self,
model,
categories,
num_iterations=12,
algorithm="bayes",
include=None,
exclude=None,
append=None,
process_results=False,
):
super().__init__(
model=model,
num_iterations=num_iterations,
algorithm=algorithm,
process_results=process_results,
prefix="trans_hpo",
)
self.space = make_space(self.model.input_features,
include=include,
exclude=exclude,
append=append,
categories=categories)
self.input_features = model.input_features
assert isinstance(self.input_features, list)
self.output_features = model.output_features
if isinstance(self.output_features, str):
self.output_features = [self.output_features]
assert len(self.output_features) == 1
def update(self, config, suggestions):
"""updates `x_transformation` and `y_transformation` keys in config
based upon `suggestions`."""
transformations = []
y_transformations = []
for feature, method in suggestions.items():
if method == "none":
pass
else:
t = {"method": method, "features": [feature]}
if method.startswith("log"):
t["treat_negatives"] = True
t["replace_zeros"] = True
elif method in ["box-cox", "yeo-johnson", "power"]:
t["treat_negatives"] = True
t["replace_zeros"] = True
elif method == "sqrt":
t['treat_negatives'] = True
t["replace_zeros"] = True
if feature in self.input_features:
transformations.append(t)
else:
y_transformations.append(t)
# following parameters must be overwritten even if they were provided by the user.
config['x_transformation'] = transformations
config['y_transformation'] = y_transformations or None
return
def make_space(
input_features: list,
categories: list,
include: Union[str, list, dict] = None,
exclude: Union[str, list] = None,
append: dict = None,
) -> list:
"""
Arguments:
input_features :
categories :
include: the names of input features to include
exclude: the name/names of input features to exclude
append: the input features with custom candidate transformations
"""
hpo = importlib.import_module("ai4water.hyperopt")
Categorical = hpo.Categorical
# although we need space as list at the end, it is better to create a dictionary of it
# because manipulating dictionary is easier
space = {
name: Categorical(categories, name=name) for name in input_features
}
if include is not None:
if isinstance(include, str):
include = {include: categories}
elif isinstance(include, list):
_include = {}
for feat in include:
assert feat in input_features, f"{feat} is not in input_features but is used in include"
if isinstance(feat, str):
_include[feat] = categories
else:
assert isinstance(feat, dict) and len(feat) == 1
_include[list(feat.keys())[0]] = list(feat.values())[0]
include = _include
else:
assert isinstance(include, dict) and len(include) == 1
# since include is given, we will ignore default case when all features are considered
space = {}
for k, v in include.items():
if not isinstance(v, Categorical):
assert isinstance(v, list), f"space for {k} must be list but it is {v.__class__.__name__}"
v = Categorical(v, name=k)
space[k] = v
if exclude is not None:
if isinstance(exclude, str):
exclude = [exclude]
assert isinstance(exclude, list)
for feat in exclude:
space.pop(feat)
if append is not None:
assert isinstance(append, dict)
for k, v in append.items():
if not isinstance(v, Categorical):
assert isinstance(v, list), f"space for {k} must be list but it is {v.__class__.__name__}"
v = Categorical(v, name=k)
space[k] = v
return list(space.values()) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/_optimize.py | _optimize.py |
__all__ = ["DualAttentionModel", "InputAttentionModel"]
from easy_mpl import imshow
from .backend import tf, plt, np, os
from .backend import keras
from .functional import Model as FModel
from ai4water.utils.utils import print_something
from .utils.utils import DataNotFound
from ai4water.nn_tools import check_act_fn
from ai4water.preprocessing import DataSet
from ai4water.models._tensorflow.layer_definition import MyTranspose, MyDot
from ai4water.utils.utils import plot_activations_along_inputs
layers = keras.layers
KModel = keras.models.Model
class DALSTM(keras.layers.Layer):
def __init__(
self,
enc_config: dict = None,
dec_config: dict = None,
drop_remainder: bool = True,
teacher_forcing: bool = False,
**kwargs
):
self.enc_config = enc_config
self.dec_config = dec_config
self.drop_remainder = drop_remainder
self.teacher_forcing = teacher_forcing
super().__init__(**kwargs)
raise NotImplementedError
class DualAttentionModel(FModel):
"""
This is Dual-Attention LSTM model of Qin_ et al., 2017. The code is adopted
from this_ repository
Example:
>>> from ai4water import DualAttentionModel
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> model = DualAttentionModel(lookback=5,
... input_features=data.columns.tolist()[0:-1],
... output_features=data.columns.tolist()[-1:])
... #If you do not wish to feed previous output as input to the model, you
... #can set teacher forcing to False. The drop_remainder argument must be
... #set to True in such a case.
>>> model = DualAttentionModel(teacher_forcing=False, batch_size=4,
... drop_remainder=True, ts_args={'lookback':5})
>>> model.fit(data=data)
.. _Qin:
https://arxiv.org/abs/1704.02971
.. _this:
https://github.com/chensvm/A-Dual-Stage-Attention-Based-Recurrent-Neural-Network-for-Time-Series-Prediction
"""
_enc_config = {'n_h': 20, # length of hidden state m
'n_s': 20, # length of hidden state m
'm': 20, # length of hidden state m
'enc_lstm1_act': None,
'enc_lstm2_act': None,
}
# arguments for decoder/outputAttention in Dual stage attention
_dec_config = {
'p': 30,
'n_hde0': 30,
'n_sde0': 30
}
def __init__(
self,
enc_config: dict = None,
dec_config: dict = None,
teacher_forcing: bool = True,
**kwargs
):
"""
Arguments:
enc_config:
dictionary defining configuration of encoder/input attention. It must
have following three keys
- n_h: 20
- n_s: 20
- m: 20
- enc_lstm1_act: None
- enc_lstm2_act: None
dec_config:
dictionary defining configuration of decoder/output attention. It must have
following three keys
- p: 30
- n_hde0: None
- n_sde0: None
teacher_forcing:
Whether to use the prvious target/observation as input or not. If
yes, then the model will require 2 inputs. The first input will be
of shape (num_examples, lookback, num_inputs) while the second input
will be of shape (num_examples, lookback-1, 1). This second input is
supposed to be the target variable observed at previous time step.
kwargs :
The keyword arguments for the [ai4water's Model][ai4water.Model] class
"""
self.method = 'dual_attention'
if enc_config is None:
enc_config = DualAttentionModel._enc_config
else:
assert isinstance(enc_config, dict)
if dec_config is None:
dec_config = DualAttentionModel._dec_config
else:
assert isinstance(dec_config, dict)
self.enc_config = enc_config
self.dec_config = dec_config
super(DualAttentionModel, self).__init__(teacher_forcing=teacher_forcing, **kwargs)
setattr(self, 'category', "DL")
def build(self, input_shape=None):
self.config['dec_config'] = self.dec_config
self.config['enc_config'] = self.enc_config
setattr(self, 'batch_size', self.config['batch_size'])
setattr(self, 'drop_remainder', self.config['drop_remainder'])
self.de_LSTM_cell = layers.LSTM(self.dec_config['p'], return_state=True, name='decoder_LSTM')
self.de_densor_We = layers.Dense(self.enc_config['m'])
if self.config['drop_remainder']:
h_de0 = tf.zeros((self.batch_size, self.dec_config['n_hde0']), name='dec_1st_hidden_state')
s_de0 = tf.zeros((self.batch_size, self.dec_config['n_sde0']), name='dec_1st_cell_state')
else:
h_de0 = layers.Input(shape=(self.dec_config['n_hde0'],), name='dec_1st_hidden_state')
s_de0 = layers.Input(shape=(self.dec_config['n_sde0'],), name='dec_1st_cell_state')
input_y = None
if self.teacher_forcing and self.drop_remainder:
input_y = layers.Input(batch_shape=(self.batch_size, self.lookback - 1, self.num_outs), name='input_y')
elif not self.drop_remainder:
input_y = layers.Input(shape=(self.lookback - 1, self.num_outs), name='input_y')
if self.drop_remainder:
enc_input = keras.layers.Input(batch_shape=(self.batch_size, self.lookback, self.num_ins), name='enc_input')
else:
enc_input = keras.layers.Input(shape=(self.lookback, self.num_ins), name='enc_input')
enc_lstm_out, s0, h0 = self._encoder(enc_input, self.config['enc_config'])
# originally the last dimentions was -1 but I put it equal to 'm'
# eq 11 in paper
enc_out = layers.Reshape((self.lookback, self.enc_config['m']), name='enc_out_eq_11')(enc_lstm_out)
h, context = self.decoder_attention(enc_out, input_y, s_de0, h_de0)
h = layers.Reshape((self.num_outs, self.dec_config['p']))(h)
# concatenation of decoder hidden state and the context vector.
last_concat = layers.Concatenate(axis=2, name='last_concat')([h, context]) # (None, 1, 50)
# original it was not defined but in tf-keras we need to define it
sec_dim = self.enc_config['m'] + self.dec_config['p']
last_reshape = layers.Reshape((sec_dim,), name='last_reshape')(last_concat) # (None, 50)
result = layers.Dense(self.dec_config['p'], name='eq_22')(last_reshape) # (None, 30) # equation 22
output = layers.Dense(self.num_outs)(result)
if self.forecast_len>1:
output = layers.Reshape(target_shape=(self.num_outs, self.forecast_len))(output)
initial_input = [enc_input]
if input_y is not None:
initial_input.append(input_y)
if self.config['drop_remainder']:
self._model = self.compile(model_inputs=initial_input, outputs=output)
else:
self._model = self.compile(model_inputs=initial_input + [s0, h0, s_de0, h_de0], outputs=output)
return
def _encoder(self, enc_inputs, config, lstm2_seq=True, suf: str = '1', s0=None, h0=None, num_ins=None):
if num_ins is None:
num_ins = self.num_ins
self.en_densor_We = layers.Dense(self.lookback, name='enc_We_'+suf)
_config, act_str = check_act_fn({'activation': config['enc_lstm1_act']})
self.en_LSTM_cell = layers.LSTM(config['n_h'], return_state=True, activation=_config['activation'],
name='encoder_LSTM_'+suf)
config['enc_lstm1_act'] = act_str
# initialize the first cell state
if s0 is None:
if self.drop_remainder:
s0 = tf.zeros((self.batch_size, config['n_s']), name=f'enc_first_cell_state_{suf}')
else:
s0 = layers.Input(shape=(config['n_s'],), name='enc_first_cell_state_' + suf)
# initialize the first hidden state
if h0 is None:
if self.drop_remainder:
h0 = tf.zeros((self.batch_size, config['n_h']), name=f'enc_first_hidden_state_{suf}')
else:
h0 = layers.Input(shape=(config['n_h'],), name='enc_first_hidden_state_' + suf)
enc_attn_out = self.encoder_attention(enc_inputs, s0, h0, num_ins, suf)
enc_lstm_in = layers.Reshape((self.lookback, num_ins), name='enc_lstm_input_'+suf)(enc_attn_out)
_config, act_str = check_act_fn({'activation': config['enc_lstm2_act']})
enc_lstm_out = layers.LSTM(config['m'], return_sequences=lstm2_seq, activation=_config['activation'],
name='LSTM_after_encoder_'+suf)(enc_lstm_in) # h_en_all
config['enc_lstm2_act'] = act_str
return enc_lstm_out, h0, s0
def one_encoder_attention_step(self, h_prev, s_prev, x, t, suf: str = '1'):
"""
:param h_prev: previous hidden state
:param s_prev: previous cell state
:param x: (T,n),n is length of input series at time t,T is length of time series
:param t: time-step
:param suf: str, Suffix to be attached to names
:return: x_t's attention weights,total n numbers,sum these are 1
"""
_concat = layers.Concatenate()([h_prev, s_prev]) # (none,1,2m)
result1 = self.en_densor_We(_concat) # (none,1,T)
result1 = layers.RepeatVector(x.shape[2],)(result1) # (none,n,T)
x_temp = MyTranspose(axis=(0, 2, 1))(x) # X_temp(None,n,T)
# (none,n,T) Ue(T,T), Ue * Xk in eq 8 of paper
result2 = MyDot(self.lookback, name='eq_8_mul_'+str(t)+'_'+suf)(x_temp)
result3 = layers.Add()([result1, result2]) # (none,n,T)
result4 = layers.Activation(activation='tanh')(result3) # (none,n,T)
result5 = MyDot(1)(result4)
result5 = MyTranspose(axis=(0, 2, 1), name='eq_8_' + str(t)+'_'+suf)(result5) # etk/ equation 8
alphas = layers.Activation(activation='softmax', name='eq_9_'+str(t)+'_'+suf)(result5) # equation 9
return alphas
def encoder_attention(self, _input, _s0, _h0, num_ins, suf: str = '1'):
s = _s0
_h = _h0
# initialize empty list of outputs
attention_weight_t = None
for t in range(self.lookback):
_context = self.one_encoder_attention_step(_h, s, _input, t, suf=suf) # (none,1,n)
x = layers.Lambda(lambda x: _input[:, t, :])(_input)
x = layers.Reshape((1, num_ins))(x)
_h, _, s = self.en_LSTM_cell(x, initial_state=[_h, s])
if t != 0:
# attention_weight_t = layers.Merge(mode='concat', concat_axis=1,
# name='attn_weight_'+str(t))([attention_weight_t,
# _context])
attention_weight_t = layers.Concatenate(
axis=1,
name='attn_weight_'+str(t)+'_'+suf)([attention_weight_t, _context])
else:
attention_weight_t = _context
# get the driving input series
enc_output = layers.Multiply(name='enc_output_'+suf)([attention_weight_t, _input]) # equation 10 in paper
return enc_output
def one_decoder_attention_step(self, _h_de_prev, _s_de_prev, _h_en_all, t):
"""
:param _h_de_prev: previous hidden state
:param _s_de_prev: previous cell state
:param _h_en_all: (None,T,m),n is length of input series at time t,T is length of time series
:param t: int, timestep
:return: x_t's attention weights,total n numbers,sum these are 1
"""
# concatenation of the previous hidden state and cell state of the LSTM unit in eq 12
_concat = layers.Concatenate(name='eq_12_'+str(t))([_h_de_prev, _s_de_prev]) # (None,1,2p)
result1 = self.de_densor_We(_concat) # (None,1,m)
result1 = layers.RepeatVector(self.lookback)(result1) # (None,T,m)
result2 = MyDot(self.enc_config['m'])(_h_en_all)
result3 = layers.Add()([result1, result2]) # (None,T,m)
result4 = layers.Activation(activation='tanh')(result3) # (None,T,m)
result5 = MyDot(1)(result4)
beta = layers.Activation(activation='softmax', name='eq_13_'+str(t))(result5) # equation 13
_context = layers.Dot(axes=1, name='eq_14_'+str(t))([beta, _h_en_all]) # (1,m) # equation 14 in paper
return _context
def decoder_attention(self, _h_en_all, _y, _s0, _h0):
s = _s0
_h = _h0
for t in range(self.lookback-1):
_context = self.one_decoder_attention_step(_h, s, _h_en_all, t) # (batch_size, 1, 20)
# if we want to use the true value of target of previous timestep as input then we will use _y
if self.teacher_forcing:
y_prev = layers.Lambda(lambda y_prev: _y[:, t, :])(_y) # (batch_size, lookback, 1) -> (batch_size, 1)
y_prev = layers.Reshape((1, self.num_outs))(y_prev) # -> (batch_size, 1, 1)
# concatenation of decoder input and computed context vector # ??
y_prev = layers.Concatenate(axis=2)([y_prev, _context]) # (None,1,21)
else:
y_prev = _context
y_prev = layers.Dense(self.num_outs, name='eq_15_'+str(t))(y_prev) # (None,1,1), Eq 15 in paper
_h, _, s = self.de_LSTM_cell(y_prev, initial_state=[_h, s]) # eq 16 ??
_context = self.one_decoder_attention_step(_h, s, _h_en_all, 'final')
return _h, _context
def fetch_data(self, x, y, source, data=None, **kwargs):
if self.teacher_forcing:
x, prev_y, labels = getattr(self.dh_, f'{source}_data')(**kwargs)
else:
x, labels = getattr(self.dh_, f'{source}_data')(**kwargs)
prev_y = None
n_s_feature_dim = self.enc_config['n_s']
n_h_feature_dim = self.enc_config['n_h']
p_feature_dim = self.dec_config['p']
if kwargs.get('use_datetime_index', False): # during deindexification, first feature will be removed.
n_s_feature_dim += 1
n_h_feature_dim += 1
p_feature_dim += 1
idx = np.expand_dims(x[:, 1:, 0], axis=-1) # extract the index from x
if self.use_true_prev_y:
prev_y = np.concatenate([prev_y, idx], axis=2) # insert index in prev_y
other_inputs = []
if not self.drop_remainder:
s0 = np.zeros((x.shape[0], n_s_feature_dim))
h0 = np.zeros((x.shape[0], n_h_feature_dim))
h_de0 = s_de0 = np.zeros((x.shape[0], p_feature_dim))
other_inputs = [s0, h0, s_de0, h_de0]
if self.teacher_forcing:
return [x, prev_y] + other_inputs, labels
else:
return [x] + other_inputs, labels
def training_data(self, x=None, y=None, data='training', key=None):
self._maybe_dh_not_set(data=data)
return self.fetch_data(x=x, y=y, source='training', data=data, key=key)
def validation_data(self, x=None, y=None, data='validation', **kwargs):
self._maybe_dh_not_set(data=data)
return self.fetch_data(x=x, y=y, source='validation', data=data, **kwargs)
def test_data(self, x=None, y=None, data='test', **kwargs):
self._maybe_dh_not_set(data=data)
return self.fetch_data(x=x, y=y, source='test', data=data, **kwargs)
def _maybe_dh_not_set(self, data):
"""if dh_ has not been set yet, try to create it using data argument if
possible"""
if isinstance(data, str) and data not in ['training', 'test', 'validation']:
self.dh_ = DataSet(data=data, **self.data_config)
elif not isinstance(data, str):
self.dh_ = DataSet(data=data, **self.data_config)
return
def interpret(
self,
data=None,
data_type='training',
**kwargs):
return self.plot_act_along_inputs(
data=data,
layer_name=f'attn_weight_{self.lookback - 1}_1',
data_type=data_type,
**kwargs)
def get_attention_weights(
self,
layer_name: str=None,
x = None,
data = None,
data_type = 'training',
)->np.ndarray:
"""
Parameters
----------
layer_name : str, optional
the name of attention layer. If not given, the final attention
layer will be used.
x : optional
input data, if given, then ``data`` must not be given
data :
data_type : str, optional
the data to make forward pass to get attention weghts. Possible
values are
- ``training``
- ``validation``
- ``test``
- ``all``
Returns
-------
a numpy array of shape (num_examples, lookback, num_ins)
"""
if x is not None:
# default value
assert data_type in ("training", "test", "validation", "all")
layer_name = layer_name or f'attn_weight_{self.lookback - 1}_1'
assert isinstance(layer_name, str), f"""
layer_name must be a string, not of {layer_name.__class__.__name__} type
"""
from ai4water.postprocessing.visualize import Visualize
kwargs = {}
if self.config['drop_remainder']:
kwargs['batch_size'] = self.config['batch_size']
activation = Visualize(model=self).get_activations(
layer_names=layer_name,
x=x,
data=data,
data_type=data_type,
**kwargs)
activation = activation[layer_name] # (num_examples, lookback, num_ins)
return activation
def plot_act_along_inputs(
self,
data,
layer_name: str,
data_type='training',
vmin=None,
vmax=None,
show=False
):
if not os.path.exists(self.act_path):
os.makedirs(self.act_path)
activation = self.get_attention_weights(
layer_name=layer_name,
data=data,
data_type=data_type,
)
act_avg_over_examples = np.mean(activation, axis=0) # (lookback, num_ins)
lookback = self.config['ts_args']['lookback']
x, observations = getattr(self, f'{data_type}_data')(data=data)
if len(x) == 0 or (isinstance(x, list) and len(x[0]) == 0):
raise ValueError(f"no {data_type} data found.")
predictions = self.predict(x=x, process_results=False)
plt.close('all')
fig, axis = plt.subplots()
ytick_labels = [f"t-{int(i)}" for i in np.linspace(lookback - 1, 0, lookback)]
im = imshow(act_avg_over_examples,
ax=axis,
aspect="auto",
yticklabels=ytick_labels,
ax_kws=dict(ylabel='lookback steps'),
show=False
)
axis.set_xticks(np.arange(self.num_ins))
axis.set_xticklabels(self.input_features, rotation=90)
fig.colorbar(im, orientation='horizontal', pad=0.3)
plt.savefig(
os.path.join(self.act_path, f'acts_avg_over_examples_{data_type}'),
dpi=400, bbox_inches='tight')
plt.close('all')
x = self.inputs_for_attention(x)
return plot_activations_along_inputs(
data=x,
activations=activation,
observations=observations,
predictions=predictions,
in_cols=self.input_features,
out_cols=self.output_features,
lookback=lookback,
name=data_type,
path=self.act_path,
vmin=vmin,
vmax=vmax,
show=show
)
def plot_act_along_lookback(self, activations, sample=0):
assert isinstance(activations, np.ndarray)
activation = activations[sample, :, :]
act_t = activation.transpose()
fig, axis = plt.subplots()
for idx, _name in enumerate(self.input_features):
axis.plot(act_t[idx, :], label=_name)
axis.set_xlabel('Lookback')
axis.set_ylabel('Input attention weight')
axis.legend(loc="best")
plt.show()
return
def inputs_for_attention(self, inputs):
""" returns the inputs for attention mechanism """
if isinstance(inputs, list):
inputs = inputs[0]
inputs = inputs[:, -1, :] # why 0, why not -1
assert inputs.shape[1] == self.num_ins
return inputs
def _fit_transform_x(self, x):
"""transforms x and puts the transformer in config witht he key name"""
feature_names = [
self.input_features,
[f"{i}" for i in range(self.enc_config['n_s'])],
[f"{i}" for i in range(self.enc_config['n_h'])],
[f"{i}" for i in range(self.dec_config['n_hde0'])],
[f"{i}" for i in range(self.dec_config['n_sde0'])],
]
transformation = [self.config['x_transformation'], None, None, None, None]
if self.teacher_forcing:
feature_names.insert(1, self.output_features)
transformation.insert(1, self.config['y_transformation'])
return self._fit_transform(x, 'x_transformer_', transformation, feature_names)
def _fetch_data(self, source:str, x=None, y=None, data=None):
"""The main idea is that the user should be able to fully customize
training/test data by overwriting training_data and test_data methods.
However, if x is given or data is DataSet then the training_data/test_data
methods of this(Model) class will not be called."""
x, y, prefix, key, user_defined_x = super()._fetch_data(source, x, y, data)
if isinstance(x, np.ndarray):
if not self.config['drop_remainder']:
n_s_feature_dim = self.config['enc_config']['n_s']
n_h_feature_dim = self.config['enc_config']['n_h']
s0 = np.zeros((x.shape[0], n_s_feature_dim))
h0 = np.zeros((x.shape[0], n_h_feature_dim))
if self.__class__.__name__ == "DualAttentionModel":
p_feature_dim = self.dec_config['p']
h_de0 = s_de0 = np.zeros((x.shape[0], p_feature_dim))
x = [x, s0, h0, h_de0, s_de0]
else:
x = [x, s0, h0]
return x, y, prefix, key, user_defined_x
class InputAttentionModel(DualAttentionModel):
"""
InputAttentionModel is same as DualAttentionModel with output attention/decoder part
removed.
Example:
>>> from ai4water import InputAttentionModel
>>> from ai4water.datasets import busan_beach
>>> model = InputAttentionModel(
... input_features=busan_beach().columns.tolist()[0:-1],
... output_features=busan_beach().columns.tolist()[-1:])
>>> model.fit(data=busan_beach())
"""
def __init__(self, *args, teacher_forcing=False, **kwargs):
super(InputAttentionModel, self).__init__(*args, teacher_forcing=teacher_forcing, **kwargs)
def build(self, input_shape=None):
self.config['enc_config'] = self.enc_config
setattr(self, 'batch_size', self.config['batch_size'])
setattr(self, 'drop_remainder', self.config['drop_remainder'])
setattr(self, 'method', 'input_attention')
print('building input attention model')
enc_input = keras.layers.Input(shape=(self.lookback, self.num_ins), name='enc_input1')
lstm_out, h0, s0 = self._encoder(enc_input, self.enc_config, lstm2_seq=False)
act_out = layers.LeakyReLU()(lstm_out)
predictions = layers.Dense(self.num_outs)(act_out)
if self.forecast_len>1:
predictions = layers.Reshape(target_shape=(self.num_outs, self.forecast_len))(predictions)
if self.verbosity > 2:
print('predictions: ', predictions)
inputs = [enc_input]
if not self.drop_remainder:
inputs = inputs + [s0, h0]
self._model = self.compile(model_inputs=inputs, outputs=predictions)
return
def fetch_data(self, source, x=None, y=None, data=None, **kwargs):
if x is None:
if isinstance(data, str):
if data in ("training", "test", "validation"):
if hasattr(self, 'dh_'):
data = getattr(self.dh_, f'{data}_data')(**kwargs)
else:
raise DataNotFound(source)
else:
raise ValueError
else:
dh = DataSet(data=data, **self.data_config)
setattr(self, 'dh_', dh)
data = getattr(dh, f'{source}_data')(**kwargs)
else:
data = x, y
if self.teacher_forcing:
x, prev_y, labels = data
else:
x, labels = data
n_s_feature_dim = self.config['enc_config']['n_s']
n_h_feature_dim = self.config['enc_config']['n_h']
if kwargs.get('use_datetime_index', False): # during deindexification, first feature will be removed.
n_s_feature_dim += 1
n_h_feature_dim += 1
idx = np.expand_dims(x[:, 1:, 0], axis=-1) # extract the index from x
if self.teacher_forcing:
prev_y = np.concatenate([prev_y, idx], axis=2) # insert index in prev_y
if not self.config['drop_remainder']:
s0 = np.zeros((x.shape[0], n_s_feature_dim))
h0 = np.zeros((x.shape[0], n_h_feature_dim))
x = [x, s0, h0]
if self.verbosity > 0:
print_something(x, "input_x")
print_something(labels, "target")
if self.teacher_forcing:
return [x, prev_y], labels
else:
return x, labels
def _fit_transform_x(self, x):
"""transforms x and puts the transformer in config witht he key name
for conformity we need to add feature names of initial states and their transformations
will always be None.
"""
# x can be array when the user does not provide input conditions!
if isinstance(x, list):
assert len(x) == 3
feature_names = [
self.input_features,
[f"{i}" for i in range(self.enc_config['n_s'])],
[f"{i}" for i in range(self.enc_config['n_h'])]
]
transformation = [self.config['x_transformation'], None, None]
return self._fit_transform(x, 'x_transformer_', transformation, feature_names)
else:
transformation = self.config['x_transformation']
return self._fit_transform(x, 'x_transformer_', transformation,
self.input_features) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/tf_models.py | tf_models.py |
from typing import Any, List
from collections import OrderedDict
from ._main import BaseModel
from ai4water.tf_attributes import ACTIVATION_LAYERS, tcn, MULTI_INPUT_LAYERS
from .nn_tools import get_call_args
from .backend import tf, torch, np, os
import ai4water.backend as K
from .models._torch import LAYERS as TORCH_LAYERS
from .tf_attributes import LAYERS
if K.BACKEND == 'tensorflow' and tf is not None:
MODEL = tf.keras.Model
elif K.BACKEND == 'pytorch' and torch is not None:
MODEL = torch.nn.Module
else:
class MODEL(object):
pass
class Model(MODEL, BaseModel):
"""
This class Inherits from `BaseModel`.
This class is a subclass of keras.Model/torch.nn.Module depending upon the
backend used. For scikit-learn/xgboost/catboost type models, this class only
inherits from `BaseModel. For deep learning/neural network based models, this
class directly exposes all the functionalities of underlying Model. Thus `self`
is now a keras Model or torch.nn.Module. If the user wishes to create his/her
own NN architecture, he/she should overwrite `initialize_layers` and `call`/`forward`
methods.
"""
def __init__(self,
verbosity=1,
model=None,
path=None,
prefix=None,
**kwargs):
"""
Initializes the layers of NN model using `initialize_layers` method.
All other input arguments goes to `BaseModel`.
"""
if K.BACKEND == 'tensorflow' and tf is not None:
min_version = tf.__version__.split(".")[1]
maj_version = tf.__version__.split(".")[0]
if maj_version in ["2"] and min_version in ["3", "4"]:
raise NotImplementedError(f"""
Not implemented due to a bug in tensorflow as shown here https://github.com/tensorflow/tensorflow/issues/44646
You can use functional API instead by using
from ai4water.functional import Model
instead of
from ai4water import Model
Or change the tensorflow version. Current version is {tf.__version__}.
""")
tf_kwargs = {}
for arg in ['inputs', 'outputs']:
if arg in kwargs:
tf_kwargs[arg] = kwargs[arg]
self._go_up = False
MODEL.__init__(self, **tf_kwargs)
self._go_up = True
BaseModel.__init__(self,
prefix=prefix,
path=path,
verbosity=verbosity,
model=model,
**kwargs)
self.config['backend'] = K.BACKEND
if torch is not None:
from .models._torch import Learner
self.torch_learner = Learner(
model=self,
batch_size=self.config['batch_size'],
num_epochs=self.config['epochs'],
shuffle=self.config['shuffle'],
to_monitor=self.config['monitor'],
patience=self.config['patience'],
path=self.path,
use_cuda=False,
wandb_config=self.config['wandb_config'],
verbosity=self.verbosity
)
if self.category == "DL":
self.initialize_layers(self.config['model']['layers'])
if K.BACKEND == 'tensorflow':
outs = self.call(self._input_lyrs(), run_call=False)
setattr(self, 'output_lyrs', outs)
self._go_up = False # do not reinitiate BaseModel and other upper classes
maj_ver = int(tf.__version__.split('.')[0])
min_ver = int(tf.__version__.split('.')[1][0])
# in tf versions >= 2.5, we don't need to specify inputs and outputs as keyword arguments
if maj_ver>1 and min_ver>=5:
MODEL.__init__(self, self._input_lyrs(), self.output_lyrs)
else:
MODEL.__init__(self, inputs=self._input_lyrs(), outputs=self.output_lyrs)
self.build(self._get_dummy_input_shape()) # will initialize ML models or build NNs
def _input_lyrs(self):
"""
Input layers of deep learning model.
`input_lyrs` can be a ListWrapper so just extract the tensor from the
list. if the length of the list ==1
"""
input_lyrs = None
if hasattr(self, 'input_lyrs'):
_input_lyrs = self.input_lyrs
if isinstance(_input_lyrs, list) and len(_input_lyrs) == 1:
input_lyrs = _input_lyrs[0]
elif _input_lyrs.__class__.__name__ == "ListWrapper" and len(_input_lyrs) == 1:
input_lyrs = _input_lyrs[0]
else:
input_lyrs = _input_lyrs
return input_lyrs
@property
def torch_learner(self):
return self._torch_learner
@torch_learner.setter
def torch_learner(self, x):
"""So that learner can be changed."""
self._torch_learner = x
@property
def layer_names(self) -> List[str]:
"""Returns a list of names of layers/nn.modules
for deep learning model. For ML models, returns empty list"""
_all_layers = []
if self.category == "ML":
pass
elif self.config['backend'] == 'tensorflow':
for layer in self.layers:
_all_layers.append(layer.name)
elif self.config['backend'] == 'pytorch':
_all_layers = list(self._modules.keys())
return _all_layers
@property
def layers_in_shapes(self) -> dict:
"""Returns the shapes of inputs to all layers"""
shapes = {}
for lyr in self.layers:
shapes[lyr.name] = lyr.input_shape
return shapes
@property
def layers_out_shapes(self) -> dict:
""" returns shapes of outputs from all layers in model as dictionary"""
shapes = {}
for lyr in self.layers:
shapes[lyr.name] = lyr.output_shape
return shapes
@property
def num_input_layers(self) -> int:
if self.category != "DL":
return np.inf
if K.BACKEND == 'pytorch':
return 1
else:
return len(self.inputs)
@property
def input_layer_names(self) -> list:
default = []
if self.inputs:
default = [lyr.name.split(':')[0] for lyr in self.inputs]
if len(default) == 0:
sec_option_inputs = self._input_lyrs()
if isinstance(sec_option_inputs, list):
default = []
for i in sec_option_inputs:
default.append(i.name)
else:
default = sec_option_inputs.name
return default
def _get_dummy_input_shape(self):
shape = ()
if K.BACKEND == 'tensorflow' and self.category == "DL":
if isinstance(self.inputs, list):
if len(self.inputs)==1:
shape = self.inputs[0].shape
else:
shape = [inp.shape for inp in self.inputs]
return shape
@property
def api(self):
return 'subclassing'
@property
def fit_fn(self):
if self.category == "DL":
if K.BACKEND == 'tensorflow':
return super().fit
elif K.BACKEND == 'pytorch':
return self.torch_learner.fit
return self._model.fit # e.g. for ML models
@property
def evaluate_fn(self):
if self.category == "DL":
if K.BACKEND == 'tensorflow':
return super().evaluate
elif K.BACKEND == 'pytorch':
return self.torch_learner.evaluate
else:
raise ValueError
elif self.category == "ML":
return self.evalute_ml_models
return self._model.evaluate
@property
def predict_fn(self):
if self.category == "DL":
if K.BACKEND == 'tensorflow':
return super().predict
elif K.BACKEND == 'pytorch':
return self.torch_learner.predict
return self._model.predict
def initialize_layers(self, layers_config: dict, inputs=None):
"""
Initializes the layers/weights/variables which are to be used in `forward`
or `call` method.
Parameters
---------
layers_config : python dictionary to define neural network. For details
[see](https://ai4water.readthedocs.io/en/latest/build_dl_models.html)
inputs : if None, it will be supposed the the `Input` layer either
exists in `layers_config` or an Input layer will be created
withing this method before adding any other layer. If not None,
then it must be in `Input` layer and the remaining NN architecture
will be built as defined in `layers_config`. This can be handy
when we want to use this method several times to build a complex
or parallel NN structure. Avoid `Input` in layer names.
"""
layers_config = layers_config.copy()
input_lyrs = []
initiated_layers = OrderedDict()
wrp_layer = None # indicator for wrapper layers
first_layer = True
for lyr, lyr_args in layers_config.items():
lyr_config, lyr_inputs, named_outs, call_args = self.deconstruct_lyr_args(lyr, lyr_args)
lyr_name, args, lyr_config, activation = self.check_lyr_config(lyr, lyr_config)
if K.BACKEND == 'pytorch':
if first_layer:
first_layer = False
if callable(lyr_config):
lyr_initiated = lyr_config
else:
lyr_initiated = TORCH_LAYERS[lyr_name](**lyr_config)
setattr(self, lyr, lyr_initiated)
initiated_layers[lyr] = {"layer": lyr_initiated, "named_outs": named_outs, 'call_args': call_args,
'inputs': lyr_inputs}
else:
# may be user has defined layers without input layer, in this case add Input layer as first layer
if first_layer:
if inputs is not None: # This method was called by providing it inputs.
assert isinstance(inputs, tf.Tensor)
# since inputs have been defined, all the layers that will be added will be next to first layer
first_layer = False
layer_outputs = inputs
initiated_layers[layer_outputs.name] = {'layer': layer_outputs, 'tf_name': lyr_name}
elif lyr_name != "Input":
if 'input_shape' in lyr_config: # input_shape is given in the first layer so make input layer
initialized_layer = LAYERS["Input"](shape=lyr_config['input_shape'])
else:
# for simple dense layer based models, lookback will not be used
def_shape = (self.num_ins,) if self.lookback == 1 else (self.lookback, self.num_ins)
initialized_layer = LAYERS["Input"](shape=def_shape)
# first layer is built so next iterations will not be for first layer
first_layer = False
# put the first layer in memory to be used for model compilation
# add th layer which the user had specified as first layer
initiated_layers[initialized_layer.name] = {'layer': initialized_layer,
'tf_name': lyr_name}
input_lyrs.append(initialized_layer)
# The inputs to the layer have not been specified, so either it is an Input layer
if lyr_inputs is None:
# or it uses the previous outputs as inputs
if lyr_name == "Input":
# it is an Input layer, hence should not be called
initialized_layer = LAYERS[lyr_name](*args, **lyr_config)
initiated_layers[lyr_config['name']] = {'layer': initialized_layer,
'tf_name': lyr_name}
input_lyrs.append(initialized_layer)
else:
# it is executable and uses previous outputs as inputs
if lyr_name in ACTIVATION_LAYERS:
layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])
initiated_layers[lyr_config['name']] = {'layer': layer_outputs,
'named_outs': named_outs,
'call_args': call_args,
'inputs': lyr_inputs,
'tf_name': lyr_name}
elif lyr_name in ['TimeDistributed', 'Bidirectional']:
wrp_layer = LAYERS[lyr_name]
# because wrapper layer name is property
initiated_layers[lyr_config['name']] = {'layer': wrp_layer,
'tf_name': lyr_name}
continue
elif "LAMBDA" in lyr_name.upper():
# lyr_config is serialized lambda layer, which needs to be deserialized
initialized_layer = tf.keras.layers.deserialize(lyr_config)
# layers_config['lambda']['config'] still contails lambda, so we need to replace the python
# object (lambda) with the serialized version (lyr_config) so that it can be saved as json file.
layers_config[lyr]['config'] = lyr_config
initiated_layers[lyr_config['name']] = {'layer': initialized_layer,
'named_outs': named_outs,
'call_args': call_args,
'inputs': lyr_inputs,
'tf_name': lyr_name}
else:
if wrp_layer is not None:
initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))
initiated_layers[lyr_config['name']] = {'layer': initialized_layer,
'named_outs': named_outs,
'call_args': call_args,
'inputs': lyr_inputs,
'tf_name': lyr_name}
wrp_layer = None
else:
if lyr_name == "TemporalFusionTransformer":
lyr_config['return_attention_components'] = True
initialized_layer = LAYERS[lyr_name](*args, **lyr_config)
initiated_layers[lyr_config['name']] = {'layer': initialized_layer,
'named_outs': named_outs,
'call_args': call_args,
'inputs': lyr_inputs,
'tf_name': lyr_name}
else: # The inputs to this layer have been specified so they must exist in lyr_cache.
# it is an executable
if lyr_name in ACTIVATION_LAYERS:
layer_outputs = ACTIVATION_LAYERS[lyr_name](name=lyr_config['name'])
initiated_layers[lyr_config['name']] = {'layer': layer_outputs,
'named_outs': named_outs,
'call_args': call_args,
'inputs': lyr_inputs,
'tf_name': lyr_name}
elif lyr_name in ['TimeDistributed', 'Bidirectional']:
wrp_layer = LAYERS[lyr_name]
# because wrapper layer name is property
initiated_layers[lyr_config['name']] = {'layer': wrp_layer,
'tf_name': lyr_name}
continue
elif "LAMBDA" in lyr_name.upper():
initialized_layer = tf.keras.layers.deserialize(lyr_config)
layers_config[lyr]['config'] = lyr_config
initiated_layers[lyr_config['name']] = {'layer': initialized_layer,
'named_outs': named_outs,
'call_args': call_args,
'inputs': lyr_inputs,
'tf_name': lyr_name}
else:
if wrp_layer is not None:
initialized_layer = wrp_layer(LAYERS[lyr_name](*args, **lyr_config))
initiated_layers[lyr_config['name']] = {'layer': initialized_layer,
'named_outs': named_outs,
'call_args': call_args,
'inputs': lyr_inputs,
'tf_name': lyr_name}
wrp_layer = None
else:
layer_initialized = LAYERS[lyr_name](*args, **lyr_config)
initiated_layers[lyr_config['name']] = {'layer': layer_initialized,
'named_outs': named_outs,
'call_args': call_args,
'inputs': lyr_inputs,
'tf_name': lyr_name}
if activation is not None: # put the string back to dictionary to be saved in config file
lyr_config['activation'] = activation
first_layer = False
self.jsonize_lyr_config(lyr_config)
# inputs = [] todo, indentify input layers
# for k,v in lyr_cache.items():
# since the model is not build yet and we have access to only output tensors of each list, this is probably
# # the only way to know that how many `Input` layers were encountered during the run of this method. Each
# tensor (except TimeDistributed) has .op.inputs attribute, which is empty if a tensor represents output of Input layer.
# if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:
# if k.upper() != "TIMEDISTRIBUTED" and hasattr(v, 'op'):
# if hasattr(v.op, 'inputs'):
# _ins = v.op.inputs
# if len(_ins) == 0:
# inputs.append(v)
# else: # not sure if this is the proper way of checking if a layer receives an input or not!
# if hasattr(v, '_keras_mask'):
# inputs.append(v)
setattr(self, 'initiated_layers', initiated_layers)
setattr(self, 'input_lyrs', input_lyrs)
# todo,
# # for case when {Input -> Dense, Input_1}, this method wrongly makes Input_1 as output so in such case use
# # {Input_1, Input -> Dense }, thus it makes Dense as output and first 2 as inputs, so throwing warning
# if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:
# if len(layer_outputs.op.inputs) < 1:
# print("Warning: the output is of Input tensor class type")
# else:
# if 'op' not in dir(layer_outputs): # layer_outputs does not have `op`, which means it has no incoming node
# print("Warning: the output is of Input tensor class type")
# outs = None
#if BACKEND == 'tensorflow':
# outs = self.call(input_lyrs)
# setattr(self, 'output_lyrs', outs)
# if BACKEND == 'tensorflow':
# ## Reinitial
# super(Model, self).__init__(
# inputs=input_lyrs,
# outputs=outs)
#MODEL.__init__(self, inputs=inputs, outputs=outs)
return input_lyrs # , outs
def call(self, inputs, training=None, mask=None, run_call=True):
version = ''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')
return getattr(self, f'call_{version}')(inputs, training, mask, run_call=run_call)
def call_260(self, *args, **kwargs):
return self.call_250(*args, **kwargs)
def call_270(self, *args, **kwargs):
return self.call_250(*args, **kwargs)
def call_290(self, *args, **kwargs):
return self.call_250(*args, **kwargs)
def call_280(self, *args, **kwargs):
return self.call_250(*args, **kwargs)
def call_200(self, *args, **kwargs):
return self.call_210(*args, **kwargs)
def call_250(self, inputs, training=None, mask=None, run_call=True):
self.treat_casted_inputs(inputs)
outs = inputs
# inputs can be a list of tensors
if isinstance(inputs, list) or isinstance(inputs, tuple) or inputs.__class__.__name__ == "Listwrapper":
cache = {getattr(i, '__dummy_name').split(':')[0]:i for i in inputs}
# if inputs is a list, then just save it in cache
elif isinstance(inputs, dict):
cache = inputs
elif isinstance(inputs, tuple):
if len(inputs) == 1:
inputs, = inputs
cache = {inputs.name.split(':')[0] : inputs}
else:
cache = {i.name.split(':')[0]: i for i in inputs}
# hopefully this is just one tensor
else:
cache = {getattr(inputs, '__dummy_name').split(':')[0]: inputs}
# todo keep tensor chache and layers cache separate
input_tensor = False
for idx, (lyr, lyr_args) in enumerate(self.initiated_layers.items()):
if isinstance(lyr_args['layer'], tf.Tensor) or idx == 0 or is_input(lyr_args['layer']):
# this must be an input layer
# assert is_input(lyr_args['layer'])
if isinstance(inputs, list):
assert all([is_input(_input) for _input in inputs]), inputs
if isinstance(inputs, tuple):
if not run_call:
assert all([is_input(_input) for _input in inputs])
# else:
# assert is_input(inputs)
input_tensor = True
# don't use the tf.keras.Input from self.initiated_layers
# outs = lyr_args['layer']
elif lyr_args['tf_name'] in ['TimeDistributed', 'Bidirectional']:
# no need to call wrapper layer so move to next iteration
continue
else:
_inputs = lyr_args.get('inputs', None)
# inputs have not been explicitly defined by the user so just
# use previous output
if _inputs is None:
_inputs = prev_output_name
if idx == 1 and _inputs not in cache:
call_args, add_args = inputs, {}
else:
call_args, add_args = get_call_args(
_inputs,
cache, lyr_args['call_args'], lyr)
# call the initiated layer
if lyr in MULTI_INPUT_LAYERS:
outs = lyr_args['layer'](*call_args, **add_args)
else:
outs = lyr_args['layer'](call_args, **add_args)
# if the layer is TFT, we need to extract the attention components
# so that they can be used during post-processign
if lyr in ["TemporalFusionTransformer", "TFT"]:
outs, self.TemporalFusionTransformer_attentions = outs
self._maybe_handle_multi_outs(lyr_args, cache, outs)
if input_tensor:
input_tensor = False
else:
cache[lyr] = outs
prev_output_name = lyr
return outs
def call_210(self, inputs, training=True, mask=None, run_call=True):
if int(''.join(np.__version__.split('.')[0:2]).ljust(3, '0')) >= 120:
raise NumpyVersionException("Decrease")
self.treat_casted_inputs(inputs)
outs = inputs
# inputs can be a list of tensors
if isinstance(inputs, list) or isinstance(inputs, tuple) or inputs.__class__.__name__ == "Listwrapper":
cache = {getattr(i, '__dummy_name').split(':')[0]:i for i in inputs}
# if inputs is a list, then just save it in cache
elif isinstance(inputs, dict):
cache = inputs
# hopefully this is just one tensor
else:
cache = {getattr(inputs, '__dummy_name').split(':')[0]: inputs}
is_input_tensor = False
for idx, (lyr, lyr_args) in enumerate(self.initiated_layers.items()):
lyr = lyr.split(':')[0]
if isinstance(lyr_args['layer'], tf.Tensor) or idx == 0 or is_input(lyr_args['layer']):
is_input_tensor = True
# this must be an input layer
# assert is_input(lyr_args['layer'])
if isinstance(inputs, list):
if not run_call:
assert all([is_input(_input) for _input in inputs])
if isinstance(inputs, tuple):
assert all([is_input(_input) for _input in inputs])
elif lyr_args['tf_name'] in ['TimeDistributed', 'Bidirectional']:
# no need to call wrapper layer so move to next iteration
continue
else:
_inputs = lyr_args.get('inputs', None)
# inputs have not been explicitly defined by the user so just use previous output
if _inputs is None:
_inputs = prev_output_name
if idx == 1 and _inputs not in cache:
call_args, add_args = inputs, {}
else:
call_args, add_args = get_call_args(_inputs, cache, lyr_args['call_args'], lyr)
# call the initiated layer
outs = lyr_args['layer'](call_args, **add_args)
# if the layer is TFT, we need to extract the attention components
# so that they can be used during post-processign
if lyr in ["TemporalFusionTransformer", "TFT"]:
outs, self.TemporalFusionTransformer_attentions = outs
self._maybe_handle_multi_outs(lyr_args, cache, outs)
if is_input_tensor:
is_input_tensor = False
else:
cache[lyr] = outs
prev_output_name = lyr
return outs
@staticmethod
def _maybe_handle_multi_outs(lyr_args, cache, outs):
if lyr_args['named_outs'] is not None:
if isinstance(outs, (list, tuple)):
# the layer gives multiple outputs either as list/tuple
if len(lyr_args['named_outs']) == len(outs):
for name, out_tensor in zip(lyr_args['named_outs'], outs):
cache[name] = out_tensor
else:
# even though the layer gives multiple outputs
# but the user has assigned it to single variable
cache[lyr_args['named_outs']] = outs
else:
cache[lyr_args['named_outs']] = outs
return
def treat_casted_inputs(self, casted_inputs):
if isinstance(casted_inputs, tuple) or isinstance(casted_inputs, list):
for in_tensor, orig_in_name in zip(casted_inputs, self.input_layer_names):
assign_dummy_name(in_tensor, orig_in_name)
elif isinstance(casted_inputs, dict):
names_to_assign = self.input_layer_names
if isinstance(names_to_assign, list):
assert len(names_to_assign) == len(casted_inputs)
for new_name, (_input, in_tensor) in zip(names_to_assign, casted_inputs.items()):
assign_dummy_name(in_tensor, new_name)
else:
raise ValueError
else:
name_to_assign = self.input_layer_names
if isinstance(name_to_assign, list):
if len(name_to_assign) == 1:
name_to_assign = name_to_assign[0]
else:
raise ValueError
assign_dummy_name(casted_inputs, name_to_assign)
return
def call_115(self, inputs, training=None, mask=None, run_call=True):
outs = inputs
# inputs can be a list of tensors
if isinstance(inputs, list):
cache = {i.name.split(':')[0]: i for i in inputs}
# if inputs is a list, then just save it in cache
elif isinstance(inputs, dict):
cache = inputs
# inputs can be a list of tensors but as a ListWrapper
elif inputs.__class__.__name__ == "Listwrapper":
cache = {i.name.split(':')[0]: i for i in inputs}
elif isinstance(inputs, tuple):
cache = {i.name.split(':')[0]: i for i in inputs}
# hopefully this is just one tensor
else:
cache = {inputs.name.split(':')[0]: inputs}
# todo keep tensor chache and layers cache separate
input_tensor = False
for idx, (lyr, lyr_args) in enumerate(self.initiated_layers.items()):
lyr = lyr.split(':')[0] # todo, this should not have been added
if isinstance(lyr_args['layer'], tf.Tensor) or idx == 0 or is_input(lyr_args['layer']):
# this must be an input layer
# assert is_input(lyr_args['layer'])
if isinstance(inputs, list):
assert all([is_input(_input) for _input in inputs])
if isinstance(inputs, tuple):
assert all([is_input(_input) for _input in inputs])
# else:
# assert is_input(inputs)
input_tensor = True
# don't use the tf.keras.Input from self.initiated_layers
elif lyr_args['tf_name'] in ['TimeDistributed', 'Bidirectional']:
# no need to call wrapper layer so move to next iteration
continue
else:
_inputs = lyr_args.get('inputs', None)
# inputs have not been explicitly defined by the user so just use previous output
if _inputs is None:
_inputs = prev_output_name
call_args, add_args = get_call_args(_inputs, cache, lyr_args['call_args'], lyr)
# call the initiated layer
outs = lyr_args['layer'](call_args, **add_args)
# if the layer is TFT, we need to extract the attention components
# so that they can be used during post-processign
if lyr in ["TemporalFusionTransformer", "TFT"]:
outs, self.TemporalFusionTransformer_attentions = outs
self._maybe_handle_multi_outs(lyr_args, cache, outs)
if input_tensor:
input_tensor = False # cache[_tensor.name] = _tensor
else:
cache[lyr] = outs
prev_output_name = lyr
return outs
def forward(self, *inputs: Any, **kwargs: Any):
"""implements forward pass implementation for pytorch based NN models."""
outs = inputs
# if inputs is a list, then just save it in cache
if isinstance(inputs, dict):
cache = inputs
# inputs can be a list of tensors but as a ListWrapper
elif inputs.__class__.__name__ == "Listwrapper":
cache = {i.name.split(':')[0]: i for i in inputs}
elif isinstance(inputs, tuple) or isinstance(inputs, list):
cache = {}
for idx, i in enumerate(inputs):
_name = i.name if i.name is not None else f'input_{idx}'
cache[_name] = i
# hopefully this is just one tensor
else:
cache = {inputs.name: inputs}
for idx, (lyr_name, lyr_args) in enumerate(self.initiated_layers.items()):
lyr = lyr_args['layer']
named_outs = lyr_args['named_outs']
call_args = lyr_args['call_args']
_inputs = lyr_args['inputs']
if idx == 0:
assert isinstance(inputs, tuple) and len(inputs) == 1
_inputs = 'input_0'
if _inputs is None:
_inputs = prev_output_name
call_args, add_args = get_call_args(_inputs, cache, call_args, lyr_name)
# actuall call
outs = lyr(call_args, **add_args)
if named_outs is not None:
if isinstance(outs, list):
assert len(named_outs) == len(outs)
for name, out_tensor in zip(named_outs, outs):
if name in cache:
raise ValueError(f"Duplicate layer found with name {name}")
cache[name] = out_tensor
if isinstance(outs, tuple):
assert len(named_outs) == len(outs)
for name, out_tensor in zip(named_outs, outs):
if name in cache:
raise ValueError(f"Duplicate layer found with name {name}")
cache[name] = out_tensor
else:
cache[named_outs] = outs
cache[lyr_name] = outs
inputs = outs
prev_output_name = lyr_name
return outs
def build(self, input_shape):
self.print_info()
if self.category == "DL" and K.BACKEND == 'tensorflow':
# Initialize the graph
self._is_graph_network = True
self._init_graph_network(
inputs=self._input_lyrs(),
outputs=self.output_lyrs
)
super().compile(
loss=self.loss(), optimizer=self.get_optimizer(), metrics=self.get_metrics())
self.info['model_parameters'] = self.trainable_parameters()
if self.verbosity > 0:
if 'tcn' in self.config['model']['layers']:
if not hasattr(self, '_layers'):
setattr(self, '_layers', self.layers)
tcn.tcn_full_summary(self, expand_residual_blocks=True)
else:
self.summary()
if self.verbosity >= 0: # if verbosity is -ve then don't plot this
self.plot_model(self)
elif self.category == "ML":
self.build_ml_model()
if not getattr(self, 'from_check_point', False) and self.verbosity>=0:
# fit may fail so better to save config before as well. This will be overwritten once the fit is complete
self.save_config()
self.update_info()
return
def first_layer_shape(self):
""" instead of tuple, returning a list so that it can be moified if needed"""
if K.BACKEND == 'pytorch':
if self.lookback == 1:
return [-1, self.num_ins]
else:
return [-1, self.lookback, self.num_ins]
if self.num_input_layers > 1:
shapes = {}
for lyr in self.inputs:
shapes[lyr.name] = lyr.shape
return shapes
shape = []
for idx, d in enumerate(self.nn_layers()[0].input.shape):
if int(tf.__version__[0]) == 1:
if isinstance(d, tf.Dimension): # for tf 1.x
d = d.value
if idx == 0: # the first dimension must remain undefined so that the user may define batch_size
d = -1
shape.append(d)
return shape
def fit(self, *args, **kwargs):
# this function is necessary here so that self.fit does not directly call keras.Model.fit
# we need to pre-process the data before feeding it to keras.fit
return self.call_fit(*args, **kwargs)
def evaluate(self, *args, **kwargs):
return self.call_evaluate(*args, **kwargs)
def predict(self,
*args, **kwargs
):
return self.call_predict(*args, **kwargs)
def loss_name(self):
return self.loss
@classmethod
def from_config(cls, *args, **kwargs):
"""This method primarily behaves like `from_config` of BaseModel. However,
it can also be used like `from_config` of the underlying Model such as
`from_config` of tf.keras.Model.
# todo test from_config with keras
"""
_config = args
if isinstance(args, tuple): # multiple non-keyword arguments were provided
if len(args) > 0:
_config = args[0]
else:
_config = kwargs['config_path']
kwargs.pop('config_path')
local = False
if 'make_new_path' in kwargs:
local = True
elif isinstance(_config, str) and os.path.isfile(_config):
local = True
elif isinstance(_config, dict) and "category" in _config:
local = True
if local:
config = None
config_path = None
# we need to build ai4water's Model class
if isinstance(_config, dict):
config = _config
else:
config_path = _config
return BaseModel._get_config_and_path(
cls,
config=config,
config_path=config_path,
**kwargs
)
# tf1.15 has from_config so call it
return super().from_config(*args, **kwargs)
def fit_pytorch(self, x, **kwargs):
"""Trains the pytorch model."""
history = self.torch_learner.fit(x, **kwargs)
setattr(self, 'history', history)
return history
def predict_pytorch(self, x, **kwargs):
from .models._torch.utils import to_torch_dataset
from torch.utils.data import DataLoader
if isinstance(x, torch.utils.data.Dataset):
dataset = x
elif isinstance(x, np.ndarray):
dataset = to_torch_dataset(x=x)
elif isinstance(x, list) and len(x) == 1:
dataset = to_torch_dataset(x[0])
else:
raise ValueError
data_loader = DataLoader(dataset, batch_size=self.config['batch_size'])
predictions = []
for i, batch_x in enumerate(data_loader):
y_pred_ = self(batch_x.float())
predictions.append(y_pred_.detach().numpy())
return np.concatenate(predictions, axis=0)
def is_input(tensor, name=''):
_is_input = False
if int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 240:
# Each tensor (except TimeDistributed) has .op.inputs attribute, which is empty
# if a tensor represents output of Input layer.
if name != "TimeDistributed" and hasattr(tensor, 'op'):
if hasattr(tensor.op, 'inputs'):
_ins = tensor.op.inputs
if len(_ins) == 0:
_is_input = True
# # not sure if this is the proper way of checking if a layer receives an input or not!
elif hasattr(tensor, '_keras_mask'):
_is_input = True
return _is_input
class NumpyVersionException(Exception):
def __init__(self, action):
self.action = action
super().__init__(self.msg())
def msg(self):
return f"""
version {np.__version__} of numpy is not compatible with tf version {tf.__version__}
{self.action} numpy version."""
def assign_dummy_name(tensor, dummy_name):
if tf.executing_eagerly():
setattr(tensor, '__dummy_name', dummy_name)
else:
if "CAST" in tensor.name.upper() or "IteratorGetNext" in tensor.name or len(getattr(tensor, '_consumers', [1]))==0:
setattr(tensor, '__dummy_name', dummy_name)
print(f"assigning name {dummy_name} to {tensor.name} with shape {getattr(tensor, 'shape', None)}")
else:
setattr(tensor, '__dummy_name', tensor.name)
return | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/main.py | main.py |
from collections import OrderedDict
from .backend import tf, keras
if keras is not None:
K = keras.backend
Sequential = tf.keras.Sequential
Model = tf.keras.models.Model
else:
K, Sequential, Model = None, None, None
def is_placeholder(n):
return (hasattr(n, '_op') and n._op.type == 'Placeholder') or '_input' in str(n) or 'input' in str(n)
def n_(node, output_format, nested=False, module=None):
if isinstance(node, list):
node_name = '_'.join([str(n.name) for n in node])
else:
node_name = str(node.name)
if module is not None and nested:
node_name = module.name + '/' + node_name
if output_format == 'simple' and ':' in node_name:
return node_name.split(':')[0]
elif output_format == 'full' and hasattr(node, 'output'):
return node.output.name
return node_name
def _evaluate(model: Model, nodes_to_evaluate, x, y=None, auto_compile=False):
if not model._is_compiled:
if auto_compile:
model.compile(loss='mse', optimizer='adam')
else:
raise Exception('Compilation of the model required.')
def eval_fn(k_inputs):
try:
return K.function(k_inputs, nodes_to_evaluate)(model._standardize_user_data(x, y))
except AttributeError: # one way to avoid forcing non eager mode.
if y is None: # tf 2.3.0 upgrade compatibility.
return K.function(k_inputs, nodes_to_evaluate)(x)
return K.function(k_inputs, nodes_to_evaluate)((x, y)) # although works.
except ValueError as e:
print('Run it without eager mode tf.compat.v1.disable_eager_execution()')
raise e
try:
return eval_fn(model._feed_inputs + model._feed_targets + model._feed_sample_weights)
except Exception:
return eval_fn(model._feed_inputs)
def get_gradients_of_trainable_weights(model, x, y):
"""
Get the gradients of trainable_weights for the kernel and the bias nodes for all filters in each layer.
Trainable_weights gradients are averaged over the minibatch.
:param model: keras compiled model
:param x: inputs for which gradients are sought (averaged over all inputs if batch_size > 1)
:param y: outputs for which gradients are sought
:return: dict mapping layers to corresponding gradients (filter_h, filter_w, in_channels, out_channels)
"""
nodes = OrderedDict([(n.name, n) for n in model.trainable_weights])
return _get_gradients(model, x, y, nodes)
def get_gradients_of_activations(model, x, y, layer_names=None, output_format='simple', nested=False):
"""
Get gradients of the outputs of the activation functions, regarding the loss.
Intuitively, it shows how your activation maps change over a tiny modification of the loss.
:param model: keras compiled model or one of ['vgg16', 'vgg19', 'inception_v3', 'inception_resnet_v2',
'mobilenet_v2', 'mobilenetv2'].
:param x: Model input (Numpy array). In the case of multi-inputs, x should be of type List.
:param y: Model target (Numpy array). In the case of multi-inputs, y should be of type List.
:param layer_names: (optional) Single name of a layer or list of layer names for which activations should be
returned. It is useful in very big networks when it is computationally expensive to evaluate all the layers/nodes.
:param output_format: Change the output dictionary key of the function.
- 'simple': output key will match the names of the Keras layers. For example Dense(1, name='d1') will
return {'d1': ...}.
- 'full': output key will match the full name of the output layer name. In the example above, it will
return {'d1/BiasAdd:0': ...}.
- 'numbered': output key will be an index range, based on the order of definition of each layer within the model.
:param nested: (optional) If set, will move recursively through the model definition to retrieve nested layers.
Recursion ends at leaf layers of the model tree or at layers with their name specified in layer_names.
E.g., a model with the following structure
-layer1
-conv1
...
-fc1
-layer2
-fc2
... yields a dictionary with keys 'layer1/conv1', ..., 'layer1/fc1', 'layer2/fc2'.
If layer_names = ['layer2/fc2'] is specified, the dictionary will only hold one key 'layer2/fc2'.
The layer names are generated by joining all layers from top level to leaf level with the separator '/'.
:return: Dict {layer_names (specified by output_format) -> activation of the layer output/node (Numpy array)}.
"""
nodes = OrderedDict()
_get_nodes(model, nodes, output_format, nested=nested, layer_names=layer_names)
return _get_gradients(model, x, y, nodes)
def _get_gradients(model, x, y, nodes):
if model.optimizer is None:
raise Exception('Please compile the model first. The loss function is required to compute the gradients.')
nodes_names = nodes.keys()
nodes_values = nodes.values()
try:
if not hasattr(model, 'total_loss'):
raise Exception('Disable TF eager mode to use get_gradients.\n'
'Add this command at the beginning of your script:\n'
'tf.compat.v1.disable_eager_execution()')
grads = model.optimizer.get_gradients(model.total_loss, nodes_values)
except ValueError as e:
if 'differentiable' in str(e):
# Probably one of the gradients operations is not differentiable...
grads = []
differentiable_nodes = []
for n in nodes_values:
try:
grads.extend(model.optimizer.get_gradients(model.total_loss, n))
differentiable_nodes.append(n)
except ValueError:
pass
# nodes_values = differentiable_nodes
else:
raise e
gradients_values = _evaluate(model, grads, x, y)
return OrderedDict(zip(nodes_names, gradients_values))
def _get_nodes(module, nodes, output_format, nested=False, layer_names=None, depth=0):
def update_node(n):
is_node_a_model = isinstance(n, (Model, Sequential))
if not is_placeholder(n):
if is_node_a_model and nested:
return
try:
mod = None if depth == 0 else module
name = n_(n, output_format, nested, mod)
if layer_names is None or name in layer_names:
if is_node_a_model:
if hasattr(n, '_layers'):
output = n._layers[-1].output
else:
output = n.layers[-1].output
else:
output = n.output
nodes.update({name: output})
except AttributeError:
pass
try:
layers = module._layers if hasattr(module, '_layers') else module.layers
except AttributeError:
return
for layer in layers:
update_node(layer)
if nested:
_get_nodes(layer, nodes, output_format, nested, layer_names, depth + 1)
def get_activations(model, x, layer_names=None, nodes_to_evaluate=None,
output_format='simple', nested=False, auto_compile=True):
"""
Fetch activations (nodes/layers outputs as Numpy arrays) for a Keras model and an input X.
By default, all the activations for all the layers are returned.
:param model: Keras compiled model or one of ['vgg16', 'vgg19', 'inception_v3', 'inception_resnet_v2',
'mobilenet_v2', 'mobilenetv2', ...].
:param x: Model input (Numpy array). In the case of multi-inputs, x should be of type List.
:param layer_names: (optional) Single name of a layer or list of layer names for which activations should be
returned. It is useful in very big networks when it is computationally expensive to evaluate all the layers/nodes.
:param nodes_to_evaluate: (optional) List of Keras nodes to be evaluated. Useful when the nodes are not
in model.layers.
:param output_format: Change the output dictionary key of the function.
- 'simple': output key will match the names of the Keras layers. For example Dense(1, name='d1') will
return {'d1': ...}.
- 'full': output key will match the full name of the output layer name. In the example above, it will
return {'d1/BiasAdd:0': ...}.
- 'numbered': output key will be an index range, based on the order of definition of each layer within the model.
:param nested: If specified, will move recursively through the model definition to retrieve nested layers.
Recursion ends at leaf layers of the model tree or at layers with their name specified in layer_names.
E.g., a model with the following structure
-layer1
-conv1
...
-fc1
-layer2
-fc2
... yields a dictionary with keys 'layer1/conv1', ..., 'layer1/fc1', 'layer2/fc2'.
If layer_names = ['layer2/fc2'] is specified, the dictionary will only hold one key 'layer2/fc2'.
The layer names are generated by joining all layers from top level to leaf level with the separator '/'.
:param auto_compile: If set to True, will auto-compile the model if needed.
:return: Dict {layer_name (specified by output_format) -> activation of the layer output/node (Numpy array)}.
"""
layer_names = [layer_names] if isinstance(layer_names, str) else layer_names
# print('Layer names:', layer_names)
nodes = OrderedDict()
if nodes_to_evaluate is None:
_get_nodes(model, nodes, output_format, nested, layer_names, auto_compile)
else:
if layer_names is not None:
raise ValueError('Do not specify a [layer_name] with [nodes_to_evaluate]. It will not be used.')
nodes = OrderedDict([(n_(node, 'full'), node) for node in nodes_to_evaluate])
if len(nodes) == 0:
if layer_names is not None:
network_layers = ', '.join([layer.name for layer in model.layers])
raise KeyError('Could not find a layer with name: [{}]. '
'Network layers are [{}]'.format(', '.join(layer_names), network_layers))
else:
raise ValueError('Nodes list is empty. Or maybe the model is empty.')
# The placeholders are processed later (Inputs node in Keras). Due to a small bug in tensorflow.
input_layer_outputs = []
layer_outputs = OrderedDict()
for key, node in nodes.items():
if isinstance(node, list):
for nod in node:
if not is_placeholder(nod):
if key not in layer_outputs:
layer_outputs[key] = []
layer_outputs[key].append(nod)
else:
if not is_placeholder(node):
layer_outputs.update({key: node})
if nodes_to_evaluate is None or (layer_names is not None) and \
any([n.name in layer_names for n in model.inputs]):
input_layer_outputs = list(model.inputs)
if len(layer_outputs) > 0:
activations = _evaluate(model, layer_outputs.values(), x, y=None)
else:
activations = {}
def craft_output(output_format_):
inputs = [x] if not isinstance(x, list) else x
activations_inputs_dict = OrderedDict(
zip([n_(output, output_format_) for output in input_layer_outputs], inputs))
activations_dict = OrderedDict(zip(layer_outputs.keys(), activations))
result_ = activations_inputs_dict.copy()
result_.update(activations_dict)
if output_format_ == 'numbered':
result_ = OrderedDict([(i, v) for i, (k, v) in enumerate(result_.items())])
return result_
result = craft_output(output_format)
if layer_names is not None: # extra check.
result = {k: v for k, v in result.items() if k in layer_names}
if nodes_to_evaluate is not None and len(result) != len(nodes_to_evaluate):
result = craft_output(output_format_='full') # collision detected in the keys.
return result | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/keract_mod.py | keract_mod.py |
__all__ = ["NN", "check_act_fn"]
from weakref import WeakKeyDictionary
try:
from ai4water.tf_attributes import ACTIVATION_LAYERS, ACTIVATION_FNS, LAYERS, tf
except ModuleNotFoundError:
tf = None
from . import backend as K
if K.BACKEND == 'tensorflow':
from ai4water.tf_attributes import LAYERS, tf
else:
try: # maybe torch is also not available.
from .models._torch import LAYERS
except (ModuleNotFoundError, ImportError):
LAYERS = {}
np = K.np
class AttributeNotSetYet:
def __init__(self, func_name):
self.data = WeakKeyDictionary()
self.func_name = func_name
def __get__(self, instance, owner):
raise AttributeError("run the function {} first to get {}".format(
self.func_name, self.name))
def __set_name__(self, owner, name):
self.name = name
class AttributeStore(object):
"""A class which will just make sure that attributes are set at its childs
class level and not here. It's purpose is just to avoid cluttering of __init__
method of its child classes. """
def __init__(self):
self._model = AttributeNotSetYet("`build` to build neural network")
self.method = None
self.en_densor_We = None
self.en_LSTM_cell = None
self.auto_enc_composite = None
self.de_LSTM_cell = None
self.de_densor_We = None
self.scalers = {}
self.is_training = False
class NN(AttributeStore):
def __init__(self,
config: dict
):
self.config = config
self.lookback = self.config['ts_args']['lookback']
AttributeStore.__init__(self)
@property
def lookback(self):
return self._lookback
@lookback.setter
def lookback(self, x):
self._lookback = x
def update_cache(self, cache: dict, key, value):
if key in cache:
raise ValueError("Duplicate input/output name found. The name '{}'"
" already exists as input/output for another layer"
.format(key))
cache[key] = value
return
def deconstruct_lyr_args(self, lyr_name, lyr_args) -> tuple:
if not isinstance(lyr_args, dict):
return lyr_args, None, None, None
if callable(lyr_name):
if hasattr(lyr_name, '__call__'):
raise ValueError
else:
config = tf.keras.layers.Lambda(lambda x: lyr_name(x))
inputs = lyr_args['inputs'] if 'inputs' in lyr_args else None
outputs = lyr_args['outputs'] if 'outputs' in lyr_args else None
call_args = lyr_args['call_args'] if 'call_args' in lyr_args else None
elif 'config' not in lyr_args:
if all([arg not in lyr_args for arg in ['inputs', 'outputs', 'call_args']]):
config = lyr_args
inputs = None
outputs = None
call_args = None
else: # todo, why we can't have inputs/outputs/call_args without config?
raise ValueError(f"No config found for layer '{lyr_name}'")
else:
config = lyr_args['config']
inputs = lyr_args['inputs'] if 'inputs' in lyr_args else None
outputs = lyr_args['outputs'] if 'outputs' in lyr_args else None
call_args = lyr_args['call_args'] if 'call_args' in lyr_args else None
if tf is not None:
if isinstance(config, tf.keras.layers.Lambda):
config = tf.keras.layers.serialize(config)
return config, inputs, outputs, call_args
def check_lyr_config(self, lyr_name: str, config: dict):
if callable(lyr_name): # lyr_name is class
if not isinstance(config, dict):
config = {}
args = [config]
else:
args = {}
if 'name' not in config and K.BACKEND != 'pytorch':
config['name'] = lyr_name.__name__
config, activation = check_act_fn(config)
return lyr_name.__name__, args, config, activation
if callable(config):
return lyr_name, [], config, None
elif not isinstance(config, dict):
args = [config]
config = {}
else:
args = []
if 'name' not in config and K.BACKEND != 'pytorch':
config['name'] = lyr_name
# for reproducibility, dropout seed should be fixed if available in self.config
if "Dropout" in lyr_name and K.BACKEND != 'pytorch':
if 'seed' not in config:
config['seed'] = self.config['seed']
activation = None
if "LAMBDA" not in lyr_name.upper():
# for lambda layers, we don't need to check activation functions and layer names.
config, activation = check_act_fn(config)
# get keras/tensorflow layer compatible layer name
lyr_name = self.get_layer_name(lyr_name)
return lyr_name, args, config, activation
def get_layer_name(self, lyr: str) -> str:
layer_name = lyr.split('_')[0]
if layer_name not in list(LAYERS.keys()) + list(ACTIVATION_LAYERS.keys()):
raise ValueError(f"""
The layer name '{lyr}' you specified, does not exist.
Is this a user defined layer? If so, make sure your
layer is being considered by AI4Water
""")
return layer_name
def get_and_set_attrs(self, layer):
# check the type without importing the layer
if layer.__class__.__name__ == "TemporalFusionTransformer":
# use layer name as there can be more than one layers from same class.
setattr(self, f'{layer.name}_attentions', layer.attention_components)
return
def get_self_attention_weights(self, inputs, **kwargs)->dict:
"""returns Softmax activations which is used inside SelfAttention layer.
:raises ValueError
if not SelfAttention layer is found.
"""
return self.get_intermediate_output("SelfAttention", inputs, keep=1, **kwargs)
def get_attention_lstm_weights(self, inputs, **kwargs)->dict:
"""returns Softmax activations which is inside SelfAttention layer.
This function should be called only when AttentionLSTM layer is used.
"""
return self.get_intermediate_output("self_attention", inputs, keep=1, **kwargs)
def get_fttransformer_weights(self, inputs, **kwargs):
"""
Returns attention weights for FTTransformer model which have shape of
(num_samples, num_input_features)
"""
weights = self.get_intermediate_output('multi_head_attention', inputs, **kwargs)
if self.verbosity>0:
print(f"found {len(weights)} layers with multi-head-attention weights named {weights.keys()}")
importances = []
for k, v in weights.items():
out, importance = v
importance = importance[:, :, 0, :]
importances.append(np.sum(importance, axis=1))
depth = self.config['model']['layers']['FTTransformer']['config']['depth']
heads = self.config['model']['layers']['FTTransformer']['config']['num_heads']
return np.sum(np.stack(importances), axis=0) / (depth * heads)
def get_intermediate_output(self, layer_name, inputs, keep=None, **kwargs)->dict:
"""keep is only useful when the intermediate layer returns more than 1 output"""
new_outs = {}
for lyr in self.layers:
if layer_name in lyr.name:
int_outputs = lyr.output
if keep is not None:
assert isinstance(int_outputs, (list, tuple))
int_outputs = int_outputs[keep]
new_outs[lyr.name] = int_outputs
if len(new_outs)==0:
raise ValueError(f"No {layer_name} layer found in Model")
new_model = tf.keras.models.Model(self.inputs, new_outs)
weights = new_model.predict(x=inputs, **kwargs)
if isinstance(weights, list):
# tensorflow 1 still returns list even if no is dictionary
weights_dict = {}
for _name, weight in zip(new_outs.keys(), weights):
weights_dict[_name] = weight
return weights_dict
return weights
@staticmethod
def jsonize_lyr_config(lyr_config:dict):
"""some arguments in lyr_config dictionary may not be jsonizable.
Jsonizing them because we have already used them so now we can save them
in json file"""
if tf is None:
return
if isinstance(lyr_config, dict):
for key, val in lyr_config.items():
if isinstance(val, tf.DType):
lyr_config[key] = val.name
return lyr_config
def check_act_fn(config: dict):
""" it is possible that the config file does not have activation argument or
activation is None"""
activation = None
if 'activation' in config:
activation = config['activation']
if activation is not None:
assert isinstance(activation, str), f"unknown activation function {activation}"
config['activation'] = ACTIVATION_FNS[activation]
return config, activation
def get_call_args(lyr_inputs, lyr_cache, add_args, lyr_name):
""" gets the additional call arguments for a layer. It is supposed that the
call arguments are actually tensors/layers
that have been created so far in the model including input layer.
The call_args can be a list of inputs as well."""
if isinstance(lyr_inputs, list):
call_args = []
for lyr_ins in lyr_inputs:
if lyr_ins not in lyr_cache:
raise ValueError("""
No layer named '{}' currently exists in the model which can be fed
as input to '{} layer. Available layers are {}' layer.""".format(
lyr_ins, lyr_name, list(lyr_cache.keys())))
call_args.append(lyr_cache[lyr_ins])
else:
if lyr_inputs not in lyr_cache:
raise ValueError(f"""
No layer named '{lyr_inputs}' currently exists in the model which
can be fed as input to '{lyr_name}' layer. Available layers are
{list(lyr_cache.keys())}
""")
call_args = lyr_cache[lyr_inputs]
return call_args, get_add_call_args(add_args, lyr_cache, lyr_name)
def get_add_call_args(add_args, lyr_cache, lyr_name):
additional_args = {}
if add_args is not None:
assert isinstance(add_args, dict), """
call_args to layer '{}' must be provided as dictionary""".format(lyr_name)
for arg_name, arg_val in add_args.items():
if isinstance(arg_val, str):
if arg_val not in lyr_cache:
raise NotImplementedError("""
The value {} for additional call argument {} to '{}'
layer not understood""".format(arg_val, arg_name, lyr_name))
additional_args[arg_name] = lyr_cache[arg_val]
# the additional argument is a list of tensors, get all of them from lyr_cache
elif isinstance(arg_val, list):
add_arg_val_list = []
for arg in arg_val:
assert isinstance(arg, str)
add_arg_val_list.append(lyr_cache[arg])
additional_args[arg_name] = add_arg_val_list
elif isinstance(arg_val, bool) or arg_val is None:
additional_args[arg_name] = arg_val
else:
raise NotImplementedError("""
The value `{}` for additional call argument {} to '{}'
layer not understood""".format(arg_val, arg_name, lyr_name))
return additional_args | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/nn_tools.py | nn_tools.py |
__all__ = ["ACTIVATION_LAYERS", "ACTIVATION_FNS", "LOSSES",
"LAYERS", "OPTIMIZERS", "tcn", "MULTI_INPUT_LAYERS"]
# it is supposed that tf is available
from .backend import get_attributes, tf
try:
import tcn
except ModuleNotFoundError:
tcn = None
LOSSES = {}
LAYERS = {}
MULTI_INPUT_LAYERS = {"EALSTM"}
if tcn is not None:
LAYERS.update({"TCN": tcn.TCN})
try:
import atten_lstm
except ModuleNotFoundError:
atten_lstm = None
if atten_lstm is not None:
from atten_lstm import SelfAttention, AttentionLSTM
LAYERS.update({"SelfAttention": SelfAttention})
LAYERS.update({"AttentionLSTM": AttentionLSTM})
if tf is not None:
import ai4water.utils.tf_losses as tf_losses
from ai4water.models._tensorflow import NBeats
import ai4water.models._tensorflow.attention_layers as attns
from ai4water.models._tensorflow import TemporalFusionTransformer
keras = tf.keras
LOSSES.update({
'nse': tf_losses.tf_nse,
'kge': tf_losses.tf_kge,
})
LOSSES.update(get_attributes(aus=tf.keras, what='losses', case_sensitive=True))
else:
NBeats, TemporalFusionTransformer, attns, keras = None, None, None, None
if tf is not None:
LAYERS.update({"TemporalFusionTransformer": TemporalFusionTransformer})
LAYERS.update({"TFT": TemporalFusionTransformer})
LAYERS.update(get_attributes(aus=tf.keras, what='layers', case_sensitive=True))
from .models._tensorflow.private_layers import PrivateLayers
# add private layers to dictionary
LAYERS.update(get_attributes(aus=PrivateLayers, what='layers', case_sensitive=True))
if NBeats is not None:
LAYERS.update({"NBeats": NBeats})
if attns is not None:
LAYERS.update(get_attributes(aus=attns, what='attn_layers', case_sensitive=True))
ACTIVATION_LAYERS = {
# https://ai.stanford.edu/%7Eamaas/papers/relu_hybrid_icml2013_final.pdf
'LeakyReLU': lambda name='softsign': keras.layers.LeakyReLU(),
# https://arxiv.org/pdf/1502.01852v1.pdf
'PReLU': lambda name='prelu': keras.layers.PReLU(name=name),
'relu': lambda name='relu': keras.layers.Activation('relu', name=name),
'tanh': lambda name='tanh': keras.layers.Activation('tanh', name=name),
'ELU': lambda name='elu': keras.layers.ELU(name=name),
'ThresholdedReLU': lambda name='ThresholdRelu': keras.layers.ThresholdedReLU(name=name),
'selu': lambda name='selu': keras.layers.Activation("selu", name=name),
'sigmoid': lambda name='sigmoid': keras.layers.Activation('sigmoid', name=name),
'hardsigmoid': lambda name='HardSigmoid': keras.layers.Activation('hard_sigmoid', name=name),
'crelu': lambda name='crelu': keras.layers.Activation(tf.nn.crelu, name=name),
'relu6': lambda name='relu6': keras.layers.Activation(tf.nn.relu6, name=name),
'softmax': lambda name='softmax': keras.layers.Activation(tf.nn.softmax, name=name),
'softplus': lambda name='sofplus': keras.layers.Activation(tf.nn.softplus, name=name),
'softsign': lambda name='softsign': keras.layers.Activation(tf.nn.softsign, name=name),
"swish": lambda name='swish': keras.layers.Activation(tf.nn.swish, name=name),
}
ACTIVATION_FNS = {
'relu': 'relu', # keras.layers.Activation('relu', name=name),
'tanh': 'tanh',
'elu': 'elu',
"hardsigmoid": 'hard_sigmoid',
"linear": 'linear',
}
if tf is not None:
ACTIVATION_FNS.update({
'leakyrelu': tf.nn.leaky_relu,
'crelu': tf.nn.crelu,
'selu': tf.nn.selu, # tf.keras.activations.selu, # https://arxiv.org/pdf/1706.02515.pdf
'relu6': tf.nn.relu6, # http://www.cs.utoronto.ca/%7Ekriz/conv-cifar10-aug2010.pdf
'softmax': tf.nn.softmax,
"softsign": tf.nn.softsign,
"softplus": tf.nn.softplus,
'sigmoid': tf.nn.sigmoid,
"swish": tf.nn.swish, # https://arxiv.org/pdf/1710.05941.pdf
})
OPTIMIZERS = {}
if tf is not None:
OPTIMIZERS.update(get_attributes(aus=tf.keras, what='optimizers', case_sensitive=True)) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/tf_attributes.py | tf_attributes.py |
__all__ = ["HARHNModel", "IMVModel"]
from typing import Any
from easy_mpl import imshow
from .main import Model
from .utils.utils import dateandtime_now, plot_activations_along_inputs
from .backend import torch, np, plt, os, mpl
if torch is not None:
from .models._torch import IMVTensorLSTM
from .models._torch import HARHN
else:
HARHN, IMVTensorLSTM = None, None
class HARHNModel(Model):
def __init__(self, use_cuda=True, teacher_forcing=True, **kwargs):
dev = torch.device("cpu")
if use_cuda and torch.cuda.is_available():
dev = torch.device("cuda")
else:
# so that use_cuda is not set incorrectly to learner
use_cuda = False
self.dev = dev
super(HARHNModel, self).__init__(teacher_forcing=teacher_forcing, **kwargs)
# should be set after initiating upper classes so that torch_learner attribute is set
self.torch_learner.use_cuda = use_cuda
def initialize_layers(self, layers_config: dict, inputs=None):
self.pt_model = HARHN(layers_config['n_conv_lyrs'],
self.lookback,
self.num_ins,
self.num_outs,
n_units_enc=layers_config['enc_units'],
n_units_dec=layers_config['dec_units'],
use_predicted_output=self.teacher_forcing, # self.config['use_predicted_output']
).to(self.dev)
return
def forward(self, *inputs: Any, **kwargs: Any):
y_pred, _ = self.pt_model(inputs[0], inputs[1][:, -1], **kwargs)
return y_pred
class IMVModel(HARHNModel):
def __init__(self, *args, teacher_forcing=False, **kwargs):
super(IMVModel, self).__init__(*args, teacher_forcing=teacher_forcing, **kwargs)
def initialize_layers(self, layers_config:dict, inputs=None):
self.pt_model = IMVTensorLSTM(self.num_ins, self.num_outs,
layers_config['hidden_units'],
device=self.dev).to(self.dev)
self.alphas, self.betas = [], []
return
def forward(self, *inputs: Any, **kwargs: Any):
y_pred, alphas, betas = self.pt_model(*inputs, **kwargs)
self.alphas.append(alphas)
self.betas.append(betas)
return y_pred
def interpret(self,
data='training',
x=None,
annotate=True,
vmin=None,
vmax=None,
**bar_kws,
):
mpl.rcParams.update(mpl.rcParamsDefault)
self.alphas, self.betas = [], []
true, predicted = self.predict(data=data, process_results=False, return_true=True)
name = f'data_on_{dateandtime_now()}' if x is not None else data
betas = [array.detach().cpu().numpy() for array in self.betas]
betas = np.concatenate(betas) # (examples, ins, 1)
betas = betas.mean(axis=0) # (ins, 1)
betas = betas[..., 0] # (ins, )
alphas = [array.detach().cpu().numpy() for array in self.alphas]
alphas = np.concatenate(alphas) # (examples, lookback, ins, 1)
x, _ = getattr(self, f'{data}_data')()
if len(x) == 0 or (isinstance(x, list) and len(x[0]) == 0):
raise ValueError(f"no {data} data found.")
path = os.path.join(self.path, "interpret")
if not os.path.exists(path):
os.makedirs(path)
plot_activations_along_inputs(data=x[:, -1, :], # todo, is -1 correct?
activations=alphas.reshape(-1, self.lookback, self.num_ins),
observations=true,
predictions=predicted,
in_cols=self.input_features,
out_cols=self.output_features,
lookback=self.lookback,
name=name,
path=path,
vmin=vmin,
vmax=vmax
)
alphas = alphas.mean(axis=0) # (lookback, ins, 1)
alphas = alphas[..., 0] # (lookback, ins)
alphas = alphas.transpose(1, 0) # (ins, lookback)
all_cols = self.input_features
plt.close('all')
fig, ax = plt.subplots()
fig.set_figwidth(16)
fig.set_figheight(16)
xticklabels=["t-"+str(i) for i in np.arange(self.lookback, 0, -1)]
imshow(alphas,
ax=ax,
xticklabels=xticklabels,
yticklabels=list(all_cols),
ax_kws=dict(title="Importance of features and timesteps"),
annotate=annotate,
show=False)
plt.savefig(os.path.join(path, f'acts_{name}'), dpi=400, bbox_inches='tight')
plt.close('all')
plt.bar(range(self.num_ins), betas, **bar_kws)
plt.xticks(ticks=range(len(all_cols)), labels=list(all_cols), rotation=90, fontsize=12)
plt.savefig(os.path.join(path, f'feature_importance_{name}'), dpi=400, bbox_inches='tight')
return | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/pytorch_models.py | pytorch_models.py |
__all__ = ["np", "os", "plt", "mpl", "pd", "random", "scipy", "stats",
"easy_mpl", "SeqMetrics",
"sklearn",
"xgboost", "catboost", "lightgbm",
"skopt", "hyperopt", "hp", "optuna",
"xr", "fiona", "netCDF4",
"sns", "imageio", "shapefile", "tf", "torch", "keras",
"requests", "plotly", "h5py", "lime",
"xgboost_models", "catboost_models", "lightgbm_models", "sklearn_models",
"get_attributes",
"wandb", "WandbCallback",
]
from types import FunctionType
import os
import random
import easy_mpl
import scipy
from scipy import stats
import SeqMetrics
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
import sklearn
except (ModuleNotFoundError, ImportError):
sklearn = None
def get_attributes(
aus,
what: str,
retain: str = None,
case_sensitive: bool = False
) -> dict:
"""gets all callable attributes of aus from what and saves them in dictionary
with their names as keys. If case_sensitive is True, then the all keys are
capitalized so that calling them becomes case insensitive. It is possible
that some of the attributes of tf.keras.layers are callable but still not
a valid `layer`, sor some attributes of tf.keras.losses are callable but
still not valid losses, in that case the error will be generated from tensorflow.
We are not catching those error right now.
Parameters
----------
aus :
parent module
what : str
child module/package
retain : str, optional (default=None)
if duplicates of 'what' exist then whether to prefer class or function.
For example, fastica and FastICA exist in sklearn.decomposition then if retain
is 'function' then fastica will be kept, if retain is 'class' then FastICA is
kept. If retain is None, then what comes later will overwrite the previously
kept object.
case_sensitive : bool, optional (default=False)
whether to consider what as case-sensitive or not. In such
a case, fastica and FastICA will both be saved as separate objects.
Example
-------
>>> get_attributes(tf.keras, 'layers') # will get all layers from tf.keras.layers
"""
if retain:
assert retain in ("class", "function")
all_attrs = {}
for obj in dir(getattr(aus, what)):
attr = getattr(getattr(aus, what), obj)
if callable(attr) and not obj.startswith('_'):
if not case_sensitive:
obj = obj.upper()
if obj in all_attrs and retain == 'function':
if isinstance(attr, FunctionType):
all_attrs[obj] = attr
elif obj in all_attrs and retain == 'class':
if not isinstance(attr, FunctionType):
all_attrs[obj] = attr
else:
all_attrs[obj] = attr
return all_attrs
def get_sklearn_models():
if sklearn is not None:
# the following line must be executed in order for get_attributes to work, don't know why
from sklearn.ensemble import RandomForestRegressor
sk_maj_ver = int(sklearn.__version__.split('.')[0])
sk_min_ver = int(sklearn.__version__.split('.')[1])
if sk_maj_ver == 0 and sk_min_ver < 24:
from sklearn.neural_network import multilayer_perceptron
else:
from sklearn.neural_network import MLPClassifier
from sklearn.multioutput import MultiOutputRegressor
from sklearn.naive_bayes import GaussianNB
from sklearn.kernel_ridge import KernelRidge
from sklearn.isotonic import isotonic_regression
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.compose import TransformedTargetRegressor
skl_models = get_attributes(sklearn, "ensemble", case_sensitive=True)
skl_models.update(get_attributes(sklearn, "dummy", case_sensitive=True))
skl_models.update(get_attributes(sklearn, "gaussian_process", case_sensitive=True))
skl_models.update(get_attributes(sklearn, "compose", case_sensitive=True))
skl_models.update(get_attributes(sklearn, "linear_model", case_sensitive=True))
skl_models.update(get_attributes(sklearn, "multioutput", case_sensitive=True))
skl_models.update(get_attributes(sklearn, "neighbors", case_sensitive=True))
skl_models.update(get_attributes(sklearn, "neural_network", case_sensitive=True))
skl_models.update(get_attributes(sklearn, "svm", case_sensitive=True))
skl_models.update(get_attributes(sklearn, "tree", case_sensitive=True))
skl_models.update(get_attributes(sklearn, "naive_bayes", case_sensitive=True))
skl_models.update(get_attributes(sklearn, "kernel_ridge", case_sensitive=True))
skl_models.update(get_attributes(sklearn, "isotonic", case_sensitive=True))
from sklearn.calibration import CalibratedClassifierCV
skl_models.update(get_attributes(sklearn, "calibration", case_sensitive=True))
from sklearn.semi_supervised import LabelPropagation
skl_models.update(get_attributes(sklearn, "semi_supervised", case_sensitive=True))
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
skl_models.update(get_attributes(sklearn, "discriminant_analysis", case_sensitive=True))
skl_models.update({"HistGradientBoostingRegressor": HistGradientBoostingRegressor,
"HistGradientBoostingClassifier": HistGradientBoostingClassifier})
else:
skl_models = {}
return skl_models
maj_version = 0
min_version = 0
try:
from tensorflow import keras
import tensorflow as tf
maj_version = int(tf.__version__[0])
min_version = int(tf.__version__[2])
except ModuleNotFoundError:
keras = None
tf = None
try:
import skopt
except ModuleNotFoundError:
skopt = None
try:
import tcn
except ModuleNotFoundError:
tcn = None
try:
import torch
except (ModuleNotFoundError, ImportError):
torch = None
try:
import seaborn as sns
except ModuleNotFoundError:
sns = None
try:
import imageio
except (ModuleNotFoundError, ImportError):
imageio = None
try:
import shapefile
except (ModuleNotFoundError, ImportError):
shapefile = None
catboost_models = {}
try:
import hyperopt
except (ModuleNotFoundError, ImportError):
hyperopt = None
if hyperopt is None:
hp = None
else:
from hyperopt import hp
try:
import xarray as xr
except (ModuleNotFoundError, ImportError):
xr = None
try:
import fiona
except (ModuleNotFoundError, ImportError):
fiona = None
try:
import netCDF4
except (ModuleNotFoundError, ImportError):
netCDF4 = None
try:
import requests
except (ModuleNotFoundError, ImportError):
requests = None
try:
import optuna
except (ModuleNotFoundError, ImportError):
optuna = None
try:
import plotly
except ImportError:
plotly = None
try:
import h5py
except ModuleNotFoundError:
h5py = None
try:
import lime
except ModuleNotFoundError:
lime = None
try:
import catboost
from catboost import CatBoostClassifier, CatBoostRegressor
catboost_models.update({"CatBoostClassifier": CatBoostClassifier})
catboost_models.update({"CatBoostRegressor": CatBoostRegressor})
except ModuleNotFoundError:
catboost = None
xgboost_models = {}
try:
import xgboost
from xgboost import XGBRegressor, XGBClassifier, XGBRFRegressor, XGBRFClassifier
xgboost_models.update({
"XGBRegressor": XGBRegressor,
"XGBClassifier": XGBClassifier,
"XGBRFRegressor": XGBRFRegressor,
"XGBRFClassifier": XGBRFClassifier,
})
except ModuleNotFoundError:
xgboost = None
lightgbm_models = {}
try:
import lightgbm
from lightgbm.sklearn import LGBMClassifier, LGBMRegressor
lightgbm_models.update({"LGBMClassifier": LGBMClassifier,
"LGBMRegressor": LGBMRegressor})
except ModuleNotFoundError:
lightgbm = None
sklearn_models = get_sklearn_models()
if sklearn is not None:
from sklearn.experimental import enable_iterative_imputer # noqa
imputations = get_attributes(sklearn, 'impute', case_sensitive=True)
else:
imputations = {}
keras = keras
torch = torch
tf = tf
try:
from wandb.keras import WandbCallback
import wandb
except ModuleNotFoundError:
WandbCallback = None
wandb = None
if tf is not None:
BACKEND = 'tensorflow'
elif torch is not None:
BACKEND = 'pytorch'
else:
BACKEND = None | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/backend.py | backend.py |
from typing import Callable, Union
from ai4water.backend import np, plt, pd
from mpl_toolkits.axes_grid1 import make_axes_locatable
from easy_mpl import bar_chart
from easy_mpl.utils import make_cols_from_cmap
def feature_interaction(
predict_func:Callable,
X,
features,
feature_names,
n_classes:int = 0,
num_grid_points=None,
grid_types="percentile",
percentile_ranges=None,
grid_ranges=None,
cust_grid_points=None,
show_percentile=False,
show_outliers=False,
end_point=True,
which_classes=None,
predict_kwds={},
ncols=2,
cmap="YlGn",
border=False,
figsize=None,
annotate=False,
annotate_counts=True,
annotate_colors=("black", "white"),
annotate_color_threshold=None,
annotate_fmt=None,
annotate_fontsize=7
):
assert isinstance(X, pd.DataFrame)
num_grid_points = _expand_default(num_grid_points, 10)
#assert grid_types in ['percentile', 'equal']
grid_types = _expand_default(grid_types, 'percentile')
percentile_ranges = _expand_default(percentile_ranges, None)
_check_percentile_range(percentile_range=percentile_ranges[0])
_check_percentile_range(percentile_range=percentile_ranges[1])
grid_ranges = _expand_default(grid_ranges, None)
cust_grid_points = _expand_default(cust_grid_points, None)
if not show_outliers:
show_outliers = [False, False]
else:
show_outliers = [True, True]
for i in range(2):
if (percentile_ranges[i] is None) and (grid_ranges[i] is None) and (cust_grid_points[i] is None):
show_outliers[i] = False
feature_types = [_check_feature(feature=features[0], df=X), _check_feature(feature=features[1], df=X)]
# prediction
prediction = predict_func(X, **predict_kwds)
info_df = X[_make_list(features[0]) + _make_list(features[1])].copy()
actual_prediction_columns = ['actual_prediction']
if n_classes == 0:
info_df['actual_prediction'] = prediction
elif n_classes == 2:
info_df['actual_prediction'] = prediction[:, 1]
else:
plot_classes = range(n_classes)
if which_classes is not None:
plot_classes = sorted(which_classes)
actual_prediction_columns = []
for class_idx in plot_classes:
info_df['actual_prediction_%d' % class_idx] = prediction[:, class_idx]
actual_prediction_columns.append('actual_prediction_%d' % class_idx)
agg_dict = {}
actual_prediction_columns_qs = []
for idx in range(len(actual_prediction_columns)):
agg_dict[actual_prediction_columns[idx]] = [q1, q2, q3]
actual_prediction_columns_qs += [actual_prediction_columns[idx] + '_%s' % q for q in ['q1', 'q2', 'q3']]
agg_dict['fake_count'] = 'count'
data_x, actual_plot_data, prepared_results = _prepare_info_plot_interact_data(
data_input=info_df, features=features, feature_types=feature_types, num_grid_points=num_grid_points,
grid_types=grid_types, percentile_ranges=percentile_ranges, grid_ranges=grid_ranges,
cust_grid_points=cust_grid_points, show_percentile=show_percentile,
show_outliers=show_outliers, endpoint=end_point, agg_dict=agg_dict)
actual_plot_data.columns = ['_'.join(col) if col[1] != '' else col[0] for col in actual_plot_data.columns]
actual_plot_data = actual_plot_data.rename(columns={'fake_count_count': 'fake_count'})
# prepare summary data frame
summary_df, info_cols = _prepare_info_plot_interact_summary(
data_x=data_x, plot_data=actual_plot_data, prepared_results=prepared_results, feature_types=feature_types)
summary_df = summary_df[info_cols + ['count'] + actual_prediction_columns_qs]
vals = []
for i in np.unique(summary_df['x1']):
row = summary_df['actual_prediction_q2'].loc[summary_df['x1'] == i]
for j in np.unique(summary_df['x2'])[::-1]:
vals.append(row.iloc[j])
counts = []
for i in np.unique(summary_df['x1']):
row = summary_df['count'].loc[summary_df['x1'] == i]
for j in np.unique(summary_df['x2'])[::-1]:
counts.append(row.iloc[j])
xticklabels = summary_df.loc[summary_df['x1'] == 0]['display_column_2'].values[::-1]
#if yticklabels is None:
yticklabels = summary_df.loc[summary_df['x2'] == 0]['display_column_1'].values
x = np.array(vals).reshape(len(yticklabels), len(xticklabels))
df = pd.DataFrame(x, columns=xticklabels, index=yticklabels)
counts = np.array(counts).reshape(len(yticklabels), len(xticklabels))
counts = pd.DataFrame(counts, columns=xticklabels, index=yticklabels, dtype=int)
fig, axes = plt.subplots(figsize=figsize)
im, cbar = heatmap(
df,
row_labels=df.index,
col_labels=df.columns,
ax=axes,
cmap=cmap,
cbarlabel="Median Prediction",
border=border
)
axes.set_ylabel(features[0])
axes.set_xlabel(features[1])
if annotate:
if annotate_counts:
annotate_imshow(
im, counts.values,
fmt=annotate_fmt or "{:n}",
fontsize=annotate_fontsize,
textcolors=annotate_colors,
threshold=annotate_color_threshold)
else:
annotate_imshow(
im,
fmt=annotate_fmt or "{:.2f}",
fontsize=annotate_fontsize,
textcolors=annotate_colors,
threshold=annotate_color_threshold,
)
return axes, summary_df
def prediction_distribution_plot(
mode:str,
inputs,
prediction,
feature,
feature_name,
n_classes: int = None,
num_grid_points=10,
grid_type='percentile',
percentile_range=None,
grid_range=None,
cust_grid_points=None,
show_percentile=False,
show_outliers=False,
end_point=True,
classes=None,
ncols=2,
figsize=None,
show=True,
):
"""
data = busan_beach()
model = Model(model="XGBRegressor")
model.fit(data=data)
y = model.predict_on_training_data(data=data)
x, _ = model.training_data(data=data)
prediction_distribution_plot(
model.mode,
inputs=pd.DataFrame(x,
columns=model.input_features),
prediction=y,
feature='tide_cm',
feature_name='tide_cm',
show_percentile=True,
n_classes=model.num_classes
)
plt.show()
"""
if mode != "regression":
assert n_classes is not None
is_binary = False
if n_classes == 2:
is_binary = True
# check inputs
feature_type, show_outliers = _check_info_plot_params(
df=inputs, feature=feature, grid_type=grid_type, percentile_range=percentile_range,
grid_range=grid_range,
cust_grid_points=cust_grid_points, show_outliers=show_outliers)
# make predictions
# info_df only contains feature value and actual predictions
info_df = inputs[_make_list(feature)]
actual_prediction_columns = ['actual_prediction']
if mode == "regression":
info_df['actual_prediction'] = prediction
elif is_binary:
info_df['actual_prediction'] = prediction[:, 1]
else:
plot_classes = range(n_classes)
if classes is not None:
_check_classes(classes_list=classes, n_classes=n_classes)
plot_classes = sorted(classes)
actual_prediction_columns = []
for class_idx in plot_classes:
info_df['actual_prediction_%d' % class_idx] = prediction[:, class_idx]
actual_prediction_columns.append('actual_prediction_%d' % class_idx)
info_df_x, summary_df, info_cols = _prepare_info_plot_data(
feature=feature, feature_type=feature_type, data=info_df, num_grid_points=num_grid_points,
grid_type=grid_type, percentile_range=percentile_range, grid_range=grid_range,
cust_grid_points=cust_grid_points, show_percentile=show_percentile,
show_outliers=show_outliers, endpoint=end_point)
# prepare data for box lines
# each box line contains 'x' and actual prediction q1, q2, q3
box_lines = []
actual_prediction_columns_qs = []
for idx in range(len(actual_prediction_columns)):
box_line = info_df_x.groupby('x', as_index=False).agg(
{actual_prediction_columns[idx]: [q1, q2, q3]}).sort_values('x', ascending=True)
box_line.columns = ['_'.join(col) if col[1] != '' else col[0] for col in box_line.columns]
box_lines.append(box_line)
actual_prediction_columns_qs += [actual_prediction_columns[idx] + '_%s' % q for q in ['q1', 'q2', 'q3']]
summary_df = summary_df.merge(box_line, on='x', how='outer').fillna(0)
summary_df = summary_df[info_cols + ['count'] + actual_prediction_columns_qs]
# Draw the bar chart
fig, ax = plt.subplots(figsize=figsize)
color = make_cols_from_cmap("PuBu", len(summary_df), 0.2)
ax = bar_chart(
summary_df['actual_prediction_q2'],
summary_df['display_column'],
ax_kws={"xlabel":"Mean Prediction", "xlabel_kws":{"fontsize": 14},
"ylabel": f"{feature_name}", 'ylabel_kws': {'fontsize': 14}},
show=False,
color=color,
bar_labels=summary_df['count'],
bar_label_kws={"color": "black", 'label_type': 'edge', 'fontsize': 14},
ax=ax,
)
if show:
plt.tight_layout()
plt.show()
return ax, summary_df
def heatmap(
data,
row_labels,
col_labels,
ax=None,
cbar_kw={},
cbarlabel="",
xlabel_on_top=True,
border:bool = False,
**kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (M, N).
row_labels
A list or array of length M with the labels for the rows.
col_labels
A list or array of length N with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
border :
xlabel_on_top
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
# cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
# cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
fig: plt.Figure = plt.gcf()
cbar = fig.colorbar(im, orientation="vertical", pad=0.2, cax=cax)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# Show all ticks and label them with the respective list entries.
ax.set_xticks(np.arange(data.shape[1]))
ax.set_xticklabels(col_labels)
ax.set_yticks(np.arange(data.shape[0]))
ax.set_yticklabels(row_labels)
if xlabel_on_top:
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
#else:
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
if not border:
# Turn spines off and create white grid.
# in older versions ax.spines is dict and in newer versions it is list
if isinstance(ax.spines, dict):
for v in ax.spines.values():
v.set_visible(False)
else:
ax.spines[:].set_visible(False)
if xlabel_on_top:
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
else:
ax.set_xticks(np.arange(data.shape[1] - 1) + .5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def annotate_imshow(
im,
data:np.ndarray=None,
textcolors:Union[tuple, np.ndarray]=("black", "white"),
threshold=None,
fmt = "{:.2f}",
**text_kws
):
"""annotates imshow
https://matplotlib.org/stable/gallery/images_contours_and_fields/image_annotated_heatmap.html
"""
if data is None:
data = im.get_array()
use_threshold = True
if isinstance(textcolors, np.ndarray) and textcolors.shape == data.shape:
assert threshold is None, f"if textcolors is given as array then threshold should be None"
use_threshold = False
else:
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max()) / 2
for i in range(data.shape[0]):
for j in range(data.shape[1]):
s = fmt.format(float(data[i, j]))
if use_threshold:
_ = im.axes.text(j, i, s,
color=textcolors[int(im.norm(data[i, j]) > threshold)],
**text_kws)
else:
_ = im.axes.text(j, i, s,
color=textcolors[i, j],
**text_kws)
return
def _make_list(x):
if not isinstance(x, list):
return [x]
return x
def q1(x):
return x.quantile(0.25)
def q2(x):
return x.quantile(0.5)
def q3(x):
return x.quantile(0.75)
def _check_classes(classes_list, n_classes):
"""Makre sure classes list is valid
Notes
-----
class index starts from 0
"""
if len(classes_list) > 0 and n_classes > 2:
if np.min(classes_list) < 0:
raise ValueError('class index should be >= 0.')
if np.max(classes_list) > n_classes - 1:
raise ValueError('class index should be < n_classes.')
return
def _expand_default(x, default):
"""Create a list of default values"""
if x is None:
return [default] * 2
return x
def _check_percentile_range(percentile_range):
"""Make sure percentile range is valid"""
if percentile_range is not None:
if type(percentile_range) != tuple:
raise ValueError('percentile_range: should be a tuple')
if len(percentile_range) != 2:
raise ValueError('percentile_range: should contain 2 elements')
if np.max(percentile_range) > 100 or np.min(percentile_range) < 0:
raise ValueError('percentile_range: should be between 0 and 100')
return
def _check_feature(feature, df):
"""Make sure feature exists and infer feature type
Feature types
-------------
1. binary
2. onehot
3. numeric
"""
if type(feature) == list:
if len(feature) < 2:
raise ValueError('one-hot encoding feature should contain more than 1 element')
if not set(feature) < set(df.columns.values):
raise ValueError('feature does not exist: %s' % str(feature))
feature_type = 'onehot'
else:
if feature not in df.columns.values:
raise ValueError('feature does not exist: %s' % feature)
if sorted(list(np.unique(df[feature]))) == [0, 1]:
feature_type = 'binary'
else:
feature_type = 'numeric'
return feature_type
def _prepare_info_plot_interact_data(data_input, features, feature_types, num_grid_points, grid_types,
percentile_ranges, grid_ranges, cust_grid_points, show_percentile,
show_outliers, endpoint, agg_dict):
"""Prepare data for information interact plots"""
prepared_results = []
for i in range(2):
prepared_result = _prepare_data_x(
feature=features[i], feature_type=feature_types[i], data=data_input,
num_grid_points=num_grid_points[i], grid_type=grid_types[i], percentile_range=percentile_ranges[i],
grid_range=grid_ranges[i], cust_grid_points=cust_grid_points[i],
show_percentile=show_percentile, show_outliers=show_outliers[i], endpoint=endpoint)
prepared_results.append(prepared_result)
if i == 0:
data_input = prepared_result['data'].rename(columns={'x': 'x1'})
data_x = prepared_results[1]['data'].rename(columns={'x': 'x2'})
data_x['fake_count'] = 1
plot_data = data_x.groupby(['x1', 'x2'], as_index=False).agg(agg_dict)
return data_x, plot_data, prepared_results
def _prepare_data_x(feature, feature_type, data, num_grid_points, grid_type, percentile_range,
grid_range, cust_grid_points, show_percentile, show_outliers, endpoint):
"""Map value to bucket based on feature grids"""
display_columns = []
bound_ups = []
bound_lows = []
percentile_columns = []
percentile_bound_lows = []
percentile_bound_ups = []
data_x = data.copy()
if feature_type == 'binary':
feature_grids = np.array([0, 1])
display_columns = ['%s_0' % feature, '%s_1' % feature]
data_x['x'] = data_x[feature]
if feature_type == 'numeric':
percentile_info = None
if cust_grid_points is None:
feature_grids, percentile_info = _get_grids(
feature_values=data_x[feature].values, num_grid_points=num_grid_points, grid_type=grid_type,
percentile_range=percentile_range, grid_range=grid_range)
else:
feature_grids = np.array(sorted(cust_grid_points))
if not show_outliers:
data_x = data_x[(data_x[feature] >= feature_grids[0])
& (data_x[feature] <= feature_grids[-1])].reset_index(drop=True)
# map feature value into value buckets
data_x['x'] = data_x[feature].apply(lambda x: _find_bucket(x=x, feature_grids=feature_grids, endpoint=endpoint))
uni_xs = sorted(data_x['x'].unique())
# create bucket names
display_columns, bound_lows, bound_ups = _make_bucket_column_names(feature_grids=feature_grids, endpoint=endpoint)
display_columns = np.array(display_columns)[range(uni_xs[0], uni_xs[-1]+1)]
bound_lows = np.array(bound_lows)[range(uni_xs[0], uni_xs[-1] + 1)]
bound_ups = np.array(bound_ups)[range(uni_xs[0], uni_xs[-1] + 1)]
# create percentile bucket names
if show_percentile and grid_type == 'percentile':
percentile_columns, percentile_bound_lows, percentile_bound_ups = \
_make_bucket_column_names_percentile(percentile_info=percentile_info, endpoint=endpoint)
percentile_columns = np.array(percentile_columns)[range(uni_xs[0], uni_xs[-1]+1)]
percentile_bound_lows = np.array(percentile_bound_lows)[range(uni_xs[0], uni_xs[-1] + 1)]
percentile_bound_ups = np.array(percentile_bound_ups)[range(uni_xs[0], uni_xs[-1] + 1)]
# adjust results
data_x['x'] = data_x['x'] - data_x['x'].min()
if feature_type == 'onehot':
feature_grids = display_columns = np.array(feature)
data_x['x'] = data_x[feature].apply(lambda x: _find_onehot_actual(x=x), axis=1)
data_x = data_x[~data_x['x'].isnull()].reset_index(drop=True)
data_x['x'] = data_x['x'].map(int)
results = {
'data': data_x,
'value_display': (list(display_columns), list(bound_lows), list(bound_ups)),
'percentile_display': (list(percentile_columns), list(percentile_bound_lows), list(percentile_bound_ups))
}
return results
def _get_grids(feature_values, num_grid_points, grid_type, percentile_range, grid_range):
"""Calculate grid points for numeric feature
Returns
-------
feature_grids: 1d-array
calculated grid points
percentile_info: 1d-array or []
percentile information for feature_grids
exists when grid_type='percentile'
"""
if grid_type == 'percentile':
# grid points are calculated based on percentile in unique level
# thus the final number of grid points might be smaller than num_grid_points
start, end = 0, 100
if percentile_range is not None:
start, end = np.min(percentile_range), np.max(percentile_range)
percentile_grids = np.linspace(start=start, stop=end, num=num_grid_points)
value_grids = np.percentile(feature_values, percentile_grids)
grids_df = pd.DataFrame()
grids_df['percentile_grids'] = [round(v, 2) for v in percentile_grids]
grids_df['value_grids'] = value_grids
grids_df = grids_df.groupby(['value_grids'], as_index=False).agg(
{'percentile_grids': lambda v: str(tuple(v)).replace(',)', ')')}).sort_values('value_grids', ascending=True)
feature_grids, percentile_info = grids_df['value_grids'].values, grids_df['percentile_grids'].values
else:
if grid_range is not None:
value_grids = np.linspace(np.min(grid_range), np.max(grid_range), num_grid_points)
else:
value_grids = np.linspace(np.min(feature_values), np.max(feature_values), num_grid_points)
feature_grids, percentile_info = value_grids, []
return feature_grids, percentile_info
def _find_onehot_actual(x):
"""Map one-hot value to one-hot name"""
try:
value = list(x).index(1)
except:
value = np.nan
return value
def _find_bucket(x, feature_grids, endpoint):
"""Find bucket that x falls in"""
# map value into value bucket
if x < feature_grids[0]:
bucket = 0
else:
if endpoint:
if x > feature_grids[-1]:
bucket = len(feature_grids)
else:
bucket = len(feature_grids) - 1
for i in range(len(feature_grids) - 2):
if feature_grids[i] <= x < feature_grids[i + 1]:
bucket = i + 1
else:
if x >= feature_grids[-1]:
bucket = len(feature_grids)
else:
bucket = len(feature_grids) - 1
for i in range(len(feature_grids) - 2):
if feature_grids[i] <= x < feature_grids[i + 1]:
bucket = i + 1
return bucket
def _make_bucket_column_names_percentile(percentile_info, endpoint):
"""Create bucket names based on percentile info"""
# create percentile bucket names
percentile_column_names = []
percentile_info_numeric = []
for p_idx, p in enumerate(percentile_info):
p_array = np.array(p.replace('(', '').replace(')', '').split(', ')).astype(np.float64)
if p_idx == 0 or p_idx == len(percentile_info) - 1:
p_numeric = np.min(p_array)
else:
p_numeric = np.max(p_array)
percentile_info_numeric.append(p_numeric)
percentile_bound_lows = [0]
percentile_bound_ups = [percentile_info_numeric[0]]
for i in range(len(percentile_info) - 1):
# for each grid point, percentile information is in tuple format
# (percentile1, percentile2, ...)
# some grid points would belong to multiple percentiles
low, high = percentile_info_numeric[i], percentile_info_numeric[i + 1]
low_str, high_str = _get_string(x=low), _get_string(x=high)
percentile_column_name = '[%s, %s)' % (low_str, high_str)
percentile_bound_lows.append(low)
percentile_bound_ups.append(high)
if i == len(percentile_info) - 2:
if endpoint:
percentile_column_name = '[%s, %s]' % (low_str, high_str)
else:
percentile_column_name = '[%s, %s)' % (low_str, high_str)
percentile_column_names.append(percentile_column_name)
low, high = percentile_info_numeric[0], percentile_info_numeric[-1]
low_str, high_str = _get_string(x=low), _get_string(x=high)
if endpoint:
percentile_column_names = ['< %s' % low_str] + percentile_column_names + ['> %s' % high_str]
else:
percentile_column_names = ['< %s' % low_str] + percentile_column_names + ['>= %s' % high_str]
percentile_bound_lows.append(high)
percentile_bound_ups.append(100)
return percentile_column_names, percentile_bound_lows, percentile_bound_ups
def _get_string(x):
if int(x) == x:
x_str = str(int(x))
elif round(x, 1) == x:
x_str = str(round(x, 1))
else:
x_str = str(round(x, 2))
return x_str
def _make_bucket_column_names(feature_grids, endpoint):
"""Create bucket names based on feature grids"""
# create bucket names
column_names = []
bound_lows = [np.nan]
bound_ups = [feature_grids[0]]
feature_grids_str = []
for g in feature_grids:
feature_grids_str.append(_get_string(x=g))
# number of buckets: len(feature_grids_str) - 1
for i in range(len(feature_grids_str) - 1):
column_name = '[%s, %s)' % (feature_grids_str[i], feature_grids_str[i + 1])
bound_lows.append(feature_grids[i])
bound_ups.append(feature_grids[i + 1])
if (i == len(feature_grids_str) - 2) and endpoint:
column_name = '[%s, %s]' % (feature_grids_str[i], feature_grids_str[i + 1])
column_names.append(column_name)
if endpoint:
column_names = ['< %s' % feature_grids_str[0]] + column_names + ['> %s' % feature_grids_str[-1]]
else:
column_names = ['< %s' % feature_grids_str[0]] + column_names + ['>= %s' % feature_grids_str[-1]]
bound_lows.append(feature_grids[-1])
bound_ups.append(np.nan)
return column_names, bound_lows, bound_ups
def _check_info_plot_params(df, feature, grid_type, percentile_range, grid_range,
cust_grid_points, show_outliers):
"""Check information plot parameters"""
assert isinstance(df, pd.DataFrame)
feature_type = _check_feature(feature=feature, df=df)
assert grid_type in ['percentile', 'equal']
_check_percentile_range(percentile_range=percentile_range)
# show_outliers should be only turned on when necessary
if (percentile_range is None) and (grid_range is None) and (cust_grid_points is None):
show_outliers = False
return feature_type, show_outliers
def _prepare_info_plot_interact_summary(data_x, plot_data, prepared_results, feature_types):
"""Prepare summary data frame for interact plots"""
x1_values = []
x2_values = []
for x1_value in range(data_x['x1'].min(), data_x['x1'].max() + 1):
for x2_value in range(data_x['x2'].min(), data_x['x2'].max() + 1):
x1_values.append(x1_value)
x2_values.append(x2_value)
summary_df = pd.DataFrame()
summary_df['x1'] = x1_values
summary_df['x2'] = x2_values
summary_df = summary_df.merge(plot_data.rename(columns={'fake_count': 'count'}),
on=['x1', 'x2'], how='left').fillna(0)
info_cols = ['x1', 'x2', 'display_column_1', 'display_column_2']
for i in range(2):
display_columns_i, bound_lows_i, bound_ups_i = prepared_results[i]['value_display']
percentile_columns_i, percentile_bound_lows_i, percentile_bound_ups_i = prepared_results[i]['percentile_display']
summary_df['display_column_%d' % (i + 1)] = summary_df['x%d' % (i + 1)].apply(lambda x: display_columns_i[int(x)])
if feature_types[i] == 'numeric':
summary_df['value_lower_%d' % (i + 1)] = summary_df['x%d' % (i + 1)].apply(lambda x: bound_lows_i[int(x)])
summary_df['value_upper_%d' % (i + 1)] = summary_df['x%d' % (i + 1)].apply(lambda x: bound_ups_i[int(x)])
info_cols += ['value_lower_%d' % (i + 1), 'value_upper_%d' % (i + 1)]
if len(percentile_columns_i) != 0:
summary_df['percentile_column_%d' % (i + 1)] = summary_df['x%d' % (i + 1)].apply(
lambda x: percentile_columns_i[int(x)])
summary_df['percentile_lower_%d' % (i + 1)] = summary_df['x%d' % (i + 1)].apply(
lambda x: percentile_bound_lows_i[int(x)])
summary_df['percentile_upper_%d' % (i + 1)] = summary_df['x%d' % (i + 1)].apply(
lambda x: percentile_bound_ups_i[int(x)])
info_cols += ['percentile_column_%d' % (i + 1), 'percentile_lower_%d' % (i + 1),
'percentile_upper_%d' % (i + 1)]
return summary_df, info_cols
def _prepare_info_plot_data(feature, feature_type, data, num_grid_points, grid_type, percentile_range,
grid_range, cust_grid_points, show_percentile, show_outliers, endpoint):
"""Prepare data for information plots"""
prepared_results = _prepare_data_x(
feature=feature, feature_type=feature_type, data=data, num_grid_points=num_grid_points, grid_type=grid_type,
percentile_range=percentile_range, grid_range=grid_range, cust_grid_points=cust_grid_points,
show_percentile=show_percentile, show_outliers=show_outliers, endpoint=endpoint)
data_x = prepared_results['data']
display_columns, bound_lows, bound_ups = prepared_results['value_display']
percentile_columns, percentile_bound_lows, percentile_bound_ups = prepared_results['percentile_display']
data_x['fake_count'] = 1
bar_data = data_x.groupby('x', as_index=False).agg({'fake_count': 'count'}).sort_values('x', ascending=True)
summary_df = pd.DataFrame(np.arange(data_x['x'].min(), data_x['x'].max() + 1), columns=['x'])
summary_df = summary_df.merge(bar_data.rename(columns={'fake_count': 'count'}), on='x', how='left').fillna(0)
summary_df['display_column'] = summary_df['x'].apply(lambda x: display_columns[int(x)])
info_cols = ['x', 'display_column']
if feature_type == 'numeric':
summary_df['value_lower'] = summary_df['x'].apply(lambda x: bound_lows[int(x)])
summary_df['value_upper'] = summary_df['x'].apply(lambda x: bound_ups[int(x)])
info_cols += ['value_lower', 'value_upper']
if len(percentile_columns) != 0:
summary_df['percentile_column'] = summary_df['x'].apply(lambda x: percentile_columns[int(x)])
summary_df['percentile_lower'] = summary_df['x'].apply(lambda x: percentile_bound_lows[int(x)])
summary_df['percentile_upper'] = summary_df['x'].apply(lambda x: percentile_bound_ups[int(x)])
info_cols += ['percentile_column', 'percentile_lower', 'percentile_upper']
return data_x, summary_df, info_cols | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/_info_plots.py | _info_plots.py |
from typing import Union
from SeqMetrics import RegressionMetrics, ClassificationMetrics
from SeqMetrics.utils import plot_metrics
from ai4water.backend import easy_mpl as ep
from ai4water.backend import np, pd, mpl, plt, os, wandb, sklearn
from ai4water.utils.utils import AttribtueSetter
from ai4water.utils.utils import get_values
from ai4water.utils.utils import dateandtime_now, ts_features, dict_to_file
from ai4water.utils.visualizations import Plot, init_subplots
from ai4water.utils.visualizations import murphy_diagram, fdc_plot, edf_plot
# competitive skill score plot/ bootstrap skill score plot as in MLAir
# rank histogram and reliability diagram for probabilitic forecasting model.
# show availability plot of data
# classification report as in yellow brick
# class prediction error as in yellow brick
# discremination threshold as in yellow brick
# Friedman's H statistic https://blog.macuyiko.com/post/2019/discovering-interaction-effects-in-ensemble-models.html
# silhouette analysis
# KS Statistic plot from labels and scores/probabilities
# reliability curves
# cumulative gain
# lift curve
#
mdates = mpl.dates
# in order to unify the use of metrics
Metrics = {
'regression': lambda t, p, multiclass=False, **kwargs: RegressionMetrics(t, p, **kwargs),
'classification': lambda t, p, multiclass=False, **kwargs: ClassificationMetrics(t, p,
multiclass=multiclass, **kwargs)
}
class ProcessPredictions(Plot):
"""post processing of results after training."""
available_plots = [
'regression', 'prediction', 'residual',
'murphy', 'fdc', 'errors', "edf"
]
def __init__(
self,
mode: str,
forecast_len: int = None,
output_features: Union[list, str] = None,
wandb_config: dict = None,
path: str = None,
dpi: int = 300,
show=1,
save: bool = True,
plots: Union[list, str] = None,
quantiles:int=None,
):
"""
Parameters
----------
mode : str
either "regression" or "classification"
forecast_len : int, optional (default=None)
forecast length, only valid when mode is regression
output_features : str, optional
names of output features
plots : int/list, optional (default=None)
the names of plots to draw. Following plots are avialble.
``residual``
``regression``
``prediction``
``errors``
``fdc``
``murphy``
``edf``
path : str
folder in which to save the results/plots
show : bool
whether to show the plots or not
save : bool
whether to save the plots or not
wandb_config :
weights and bias configuration dictionary
dpi : int
determines resolution of saved figure
Examples
--------
>>> import numpy as np
>>> from ai4water.postprocessing import ProcessPredictions
>>> true = np.random.random(100)
>>> predicted = np.random.random(100)
>>> processor = ProcessPredictions("regression", forecast_len=1,
... plots=['prediction', 'regression', 'residual'])
>>> processor(true, predicted)
# for postprocessing of classification, we need to set the mode
>>> true = np.random.randint(0, 2, (100, 1))
>>> predicted = np.random.randint(0, 2, (100, 1))
>>> processor = ProcessPredictions("classification")
>>> processor(true, predicted)
"""
self.mode = mode
self.forecast_len = forecast_len
self.output_features = output_features
self.wandb_config = wandb_config
self.quantiles = quantiles
self.show = show
self.save = save
self.dpi = dpi
if plots is None:
if mode == "regression":
plots = ['regression', 'prediction', "residual", "errors", "edf"]
else:
plots = []
elif not isinstance(plots, list):
plots = [plots]
assert all([plot in self.available_plots for plot in plots]), f"""
{plots}"""
self.plots = plots
super().__init__(path, save=save)
@property
def quantiles(self):
return self._quantiles
@quantiles.setter
def quantiles(self, x):
self._quantiles = x
def _classes(self, array):
if self.mode == "classification":
return np.unique(array)
return []
def n_classes(self, array):
if self.mode == "classification":
return len(self._classes(array))
return None
def save_or_show(self, show=None, **kwargs):
if show is None:
show = self.show
return super().save_or_show(save=self.save, show=show, **kwargs)
def __call__(
self,
true_outputs,
predicted,
metrics="minimal",
prefix="test",
index=None,
inputs=None,
model=None,
):
if self.quantiles:
return self.process_quantiles(true_outputs, predicted)
# it true_outputs and predicted are dictionary of len(1) then just get the values
true_outputs = get_values(true_outputs)
predicted = get_values(predicted)
true_outputs = np.array(true_outputs)
predicted = np.array(predicted)
AttribtueSetter(self, true_outputs)
key = {"regression": "rgr", "classification": "cls"}
getattr(self, f"process_{key[self.mode]}_results")(
true_outputs,
predicted,
inputs=inputs,
metrics=metrics,
prefix=prefix,
index=index,
)
return
def process_quantiles(self, true, predicted):
#assert self.num_outs == 1
if true.ndim == 2: # todo, this should be avoided
true = np.expand_dims(true, axis=-1)
self.quantiles = self.quantiles
self.plot_quantiles1(true, predicted)
self.plot_quantiles2(true, predicted)
self.plot_all_qs(true, predicted)
return
def horizon_plots(self, errors: dict, fname=''):
plt.close('')
_, axis = plt.subplots(len(errors), sharex='all')
legends = {'r2': "$R^2$", 'rmse': "RMSE", 'nse': "NSE"}
idx = 0
for metric_name, val in errors.items():
ax = axis[idx]
ax.plot(val, '--o', label=legends.get(metric_name, metric_name))
ax.legend(fontsize=14)
if idx >= len(errors) - 1:
ax.set_xlabel("Horizons", fontsize=14)
ax.set_ylabel(legends.get(metric_name, metric_name), fontsize=14)
idx += 1
self.save_or_show(fname=fname)
return
def plot_results(
self,
true,
predicted: pd.DataFrame,
prefix,
where,
inputs=None,
):
"""
# kwargs can be any/all of followings
# fillstyle:
# marker:
# linestyle:
# markersize:
# color:
"""
for plot in self.plots:
if plot == "murphy":
self.murphy_plot(true, predicted, prefix, where, inputs)
else:
getattr(self, f"{plot}_plot")(true, predicted, prefix, where)
return
def average_target_across_feature(self, true, predicted, feature):
raise NotImplementedError
def prediction_distribution_across_feature(self, true, predicted, feature):
raise NotImplementedError
def edf_plot(self, true, predicted, prefix, where, **kwargs):
"""cumulative distribution function of absolute error between true and
predicted.
Parameters
-----------
true :
array like
predicted :
array like
prefix :
where :
"""
if isinstance(true, (pd.DataFrame, pd.Series)):
true = true.values
if isinstance(predicted, (pd.DataFrame, pd.Series)):
predicted = predicted.values
error = np.abs(true - predicted)
edf_plot(error, xlabel="Absolute Error", show=False)
return self.save_or_show(fname=f"{prefix}_error_dist", where=where)
def murphy_plot(self, true, predicted, prefix, where, inputs, **kwargs):
murphy_diagram(true,
predicted,
reference_model="LinearRegression",
plot_type="diff",
inputs=inputs,
show=False,
**kwargs)
return self.save_or_show(fname=f"{prefix}_murphy", where=where)
def fdc_plot(self, true, predicted, prefix, where, **kwargs):
fdc_plot(predicted, true, show=False, **kwargs)
return self.save_or_show(fname=f"{prefix}_fdc",
where=where)
def residual_plot(
self,
true,
predicted,
prefix,
where,
hist_kws:dict = None,
**kwargs
):
"""
Makes residual plot
Parameters
----------
true :
array like
predicted :
array like
prefix :
where :
hist_kws :
"""
fig, axis = plt.subplots(2, sharex="all")
x = predicted.values
y = true.values - predicted.values
_hist_kws = dict(bins=20, linewidth=0.5,
edgecolor="k", grid=False, color='khaki')
if hist_kws is not None:
_hist_kws.update(hist_kws)
ep.hist(y, show=False, ax=axis[0], **_hist_kws)
axis[0].set_xticks([])
ep.plot(x, y, 'o', show=False,
ax=axis[1],
color="darksalmon",
markerfacecolor=np.array([225, 121, 144]) / 256.0,
markeredgecolor="black", markeredgewidth=0.5,
ax_kws=dict(
xlabel="Predicted",
ylabel="Residual",
xlabel_kws={"fontsize": 14},
ylabel_kws={"fontsize": 14}),
)
# draw horizontal line on y=0
axis[1].axhline(0.0)
plt.suptitle("Residual")
return self.save_or_show(fname=f"{prefix}_residual",
where=where)
def errors_plot(self, true, predicted, prefix, where, **kwargs):
errors = Metrics[self.mode](true, predicted, multiclass=self.is_multiclass_)
return plot_metrics(
errors.calculate_all(),
show=self.show,
save_path=os.path.join(self.path, where),
save=self.save,
text_kws = {"fontsize": 16},
max_metrics_per_fig=20,
)
def regression_plot(
self,
true,
predicted,
target_name,
where,
annotate_with="r2"
):
annotation_val = getattr(RegressionMetrics(true, predicted), annotate_with)()
metric_names = {'r2': "$R^2$"}
annotation_key = metric_names.get(annotate_with, annotate_with)
RIDGE_LINE_KWS = {'color': 'firebrick', 'lw': 1.0}
if isinstance(predicted, (pd.DataFrame, pd.Series)):
predicted = predicted.values
marginals = True
if np.isnan(np.array(true)).any() or np.isnan(predicted).any():
marginals = False
# if all the values in predicted are same, calculation of kde gives error
if (predicted == predicted[0]).all():
marginals = False
try:
axes = ep.regplot(true,
predicted,
marker_color='crimson',
line_color='k',
scatter_kws={'marker': "o", 'edgecolors': 'black', 'linewidth':0.5},
show=False,
marginals=marginals,
marginal_ax_pad=0.25,
marginal_ax_size=0.7,
ridge_line_kws=RIDGE_LINE_KWS,
hist=False,
)
except np.linalg.LinAlgError:
axes = ep.regplot(true,
predicted,
marker_color='crimson',
line_color='k',
scatter_kws={'marker': "o", 'edgecolors': 'black', 'linewidth': 0.5},
show=False,
marginals=False
)
axes.annotate(f'{annotation_key}: {round(annotation_val, 3)}',
xy=(0.3, 0.95),
xycoords='axes fraction',
horizontalalignment='right', verticalalignment='top',
fontsize=16)
return self.save_or_show(fname=f"{target_name}_regression",
where=where)
def prediction_plot(self, true, predicted, prefix, where):
mpl.rcParams.update(mpl.rcParamsDefault)
_, axis = init_subplots(width=12, height=8)
# it is quite possible that when data is datetime indexed, then it is not
# equalidistant and large amount of graph
# will have not data in that case lines plot will create a lot of useless
# interpolating lines where no data is present.
datetime_axis = False
if isinstance(true.index, pd.DatetimeIndex) and pd.infer_freq(true.index) is not None:
style = '.'
true = true
predicted = predicted
datetime_axis = True
else:
if np.isnan(true.values).sum() > 0:
# For Nan values we should be using this style otherwise nothing is plotted.
style = '.'
else:
style = '-'
true = true.values
predicted = predicted.values
ms = 4 if style == '.' else 2
# because the data is very large, so better to use small marker size
if len(true) > 1000:
ms = 2
axis.plot(predicted, style, color='r', label='Prediction')
axis.plot(true, style, color='b', marker='o', fillstyle='none',
markersize=ms, label='True')
axis.legend(loc="best", fontsize=22, markerscale=4)
if datetime_axis:
loc = mdates.AutoDateLocator(minticks=4, maxticks=6)
axis.xaxis.set_major_locator(loc)
fmt = mdates.AutoDateFormatter(loc)
axis.xaxis.set_major_formatter(fmt)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("Time", fontsize=18)
return self.save_or_show(fname=f"{prefix}_prediction", where=where)
def plot_all_qs(self, true_outputs, predicted, save=False):
plt.close('all')
plt.style.use('ggplot')
st, en = 0, true_outputs.shape[0]
plt.plot(np.arange(st, en), true_outputs[st:en, 0], label="True", color='navy')
for idx, q in enumerate(self.quantiles):
q_name = "{:.1f}".format(q * 100)
plt.plot(np.arange(st, en), predicted[st:en, idx], label="q {} %".format(q_name))
plt.legend(loc="best")
self.save_or_show(save, fname="all_quantiles", where='results')
return
def plot_quantiles1(self, true_outputs, predicted, st=0, en=None, save=True):
plt.close('all')
plt.style.use('ggplot')
assert true_outputs.shape[-2:] == (1, 1)
if en is None:
en = true_outputs.shape[0]
for q in range(len(self.quantiles) - 1):
st_q = "{:.1f}".format(self.quantiles[q] * 100)
en_q = "{:.1f}".format(self.quantiles[-q] * 100)
plt.plot(np.arange(st, en), true_outputs[st:en, 0], label="True",
color='navy')
plt.fill_between(np.arange(st, en), predicted[st:en, q].reshape(-1, ),
predicted[st:en, -q].reshape(-1, ), alpha=0.2,
color='g', edgecolor=None, label=st_q + '_' + en_q)
plt.legend(loc="best")
self.save_or_show(save, fname='q' + st_q + '_' + en_q, where='results')
return
def plot_quantiles2(
self, true_outputs,
predicted,
st=0,
en=None,
save=True
):
plt.close('all')
plt.style.use('ggplot')
if en is None:
en = true_outputs.shape[0]
for q in range(len(self.quantiles) - 1):
st_q = "{:.1f}".format(self.quantiles[q] * 100)
en_q = "{:.1f}".format(self.quantiles[q + 1] * 100)
plt.plot(np.arange(st, en), true_outputs[st:en, 0], label="True",
color='navy')
plt.fill_between(np.arange(st, en),
predicted[st:en, q].reshape(-1, ),
predicted[st:en, q + 1].reshape(-1, ),
alpha=0.2,
color='g', edgecolor=None, label=st_q + '_' + en_q)
plt.legend(loc="best")
self.save_or_show(save, fname='q' + st_q + '_' + en_q + ".png",
where='results')
return
def plot_quantile(self, true_outputs, predicted, min_q: int, max_q, st=0,
en=None, save=False):
plt.close('all')
plt.style.use('ggplot')
if en is None:
en = true_outputs.shape[0]
q_name = "{:.1f}_{:.1f}_{}_{}".format(self.quantiles[min_q] * 100,
self.quantiles[max_q] * 100, str(st),
str(en))
plt.plot(np.arange(st, en), true_outputs[st:en, 0], label="True", color='navy')
plt.fill_between(np.arange(st, en),
predicted[st:en, min_q].reshape(-1, ),
predicted[st:en, max_q].reshape(-1, ),
alpha=0.2,
color='g', edgecolor=None, label=q_name + ' %')
plt.legend(loc="best")
self.save_or_show(save, fname="q_" + q_name + ".png", where='results')
return
def roc_curve(self, estimator, x, y, prefix=None):
if hasattr(estimator, '_model'):
if estimator._model.__class__.__name__ in ["XGBClassifier", "XGBRFClassifier"] and isinstance(x,
np.ndarray):
x = pd.DataFrame(x, columns=estimator.input_features)
plot_roc_curve(estimator, x, y.reshape(-1, ))
self.save_or_show(fname=f"{prefix}_roc")
return
def confusion_matrix(self, true, predicted, prefix=None, cmap="Blues", **kwargs):
"""plots confusion matrix.
cmap :
**kwargs :
any keyword arguments for imshow
"""
cm = ClassificationMetrics(
true,
predicted,
multiclass=self.is_multiclass_).confusion_matrix()
kws = {
'annotate': True,
'colorbar': True,
'cmap': cmap,
'xticklabels': self.classes_,
'yticklabels': self.classes_,
'ax_kws': {'xlabel': "Predicted Label",
'ylabel': "True Label"},
'show': False,
'annotate_kws': {'fontsize': 14, "fmt": '%.f', 'ha':"left"}
}
kws.update(kwargs)
ep.imshow(cm, **kws)
self.save_or_show(fname=f"{prefix}_confusion_matrix")
return
def precision_recall_curve(self, estimator, x, y, prefix=None):
if hasattr(estimator, '_model'):
if estimator._model.__class__.__name__ in ["XGBClassifier", "XGBRFClassifier"] and isinstance(x,
np.ndarray):
x = pd.DataFrame(x, columns=estimator.input_features)
plot_precision_recall_curve(estimator, x, y.reshape(-1, ))
self.save_or_show(fname=f"{prefix}_plot_precision_recall_curve")
return
def process_rgr_results(
self,
true: np.ndarray,
predicted: np.ndarray,
metrics="minimal",
prefix=None,
index=None,
remove_nans=True,
inputs=None,
):
"""
predicted, true are arrays of shape (examples, outs, forecast_len).
"""
# if user_defined_data:
if self.output_features is None:
# when data is user_defined, we don't know what out_cols, and forecast_len are
if predicted.size == len(predicted):
out_cols = ['output']
forecast_len = 1
else:
out_cols = [f'output_{i}' for i in range(predicted.shape[-1])]
forecast_len = 1
true, predicted = self.maybe_not_3d_data(true, predicted)
else:
# for cases if they are 2D/1D, add the third dimension.
true, predicted = self.maybe_not_3d_data(true, predicted)
forecast_len = self.forecast_len
if isinstance(forecast_len, dict):
forecast_len = np.unique(list(forecast_len.values())).item()
out_cols = self.output_features
if isinstance(out_cols, dict):
_out_cols = []
for cols in out_cols.values():
_out_cols = _out_cols + cols
out_cols = _out_cols
if len(out_cols) > 1 and not isinstance(predicted, np.ndarray):
raise NotImplementedError("""
can not process results with more than 1 output arrays""")
for idx, out in enumerate(out_cols):
horizon_errors = {metric_name: [] for metric_name in ['nse', 'rmse']}
for h in range(forecast_len):
errs = dict()
fpath = os.path.join(self.path, out)
if not os.path.exists(fpath):
os.makedirs(fpath)
t = pd.DataFrame(true[:, idx, h], index=index, columns=['true_' + out])
p = pd.DataFrame(predicted[:, idx, h], index=index, columns=['pred_' + out])
if wandb is not None and self.wandb_config is not None:
_wandb_scatter(t.values, p.values, out)
df = pd.concat([t, p], axis=1)
df = df.sort_index()
fname = f"{prefix}_{out}_{h}"
df.to_csv(os.path.join(fpath, fname + ".csv"), index_label='index')
self.plot_results(t, p, prefix=fname, where=out, inputs=inputs)
if remove_nans:
nan_idx = np.isnan(t)
t = t.values[~nan_idx]
p = p.values[~nan_idx]
errors = RegressionMetrics(t, p)
errs[out + '_errors_' + str(h)] = getattr(errors, f'calculate_{metrics}')()
errs[out + 'true_stats_' + str(h)] = ts_features(t)
errs[out + 'predicted_stats_' + str(h)] = ts_features(p)
dict_to_file(fpath, errors=errs, name=prefix)
for p in horizon_errors.keys():
horizon_errors[p].append(getattr(errors, p)())
if forecast_len > 1:
self.horizon_plots(horizon_errors, f'{prefix}_{out}_horizons.png')
return
def process_cls_results(
self,
true: np.ndarray,
predicted: np.ndarray,
metrics="minimal",
prefix=None,
index=None,
inputs=None,
model=None,
):
"""post-processes classification results."""
if self.is_multilabel_:
return self.process_multilabel(true, predicted, metrics, prefix, index)
if self.is_multiclass_:
return self.process_multiclass(true, predicted, metrics, prefix, index)
else:
return self.process_binary(true, predicted, metrics, prefix, index, model=None)
def process_multilabel(self, true, predicted, metrics, prefix, index):
for label in range(true.shape[1]):
if self.n_classes(true[:, label]) == 2:
self.process_binary(true[:, label], predicted[:, label], metrics, f"{prefix}_{label}", index)
else:
self.process_multiclass(true[:, label], predicted[:, label], metrics, f"{prefix}_{label}", index)
return
def process_multiclass(self, true, predicted, metrics, prefix, index):
if len(predicted) == predicted.size:
predicted = predicted.reshape(-1, 1)
else:
predicted = np.argmax(predicted, axis=1).reshape(-1, 1)
if len(true) == true.size:
true = true.reshape(-1, 1)
else:
true = np.argmax(true, axis=1).reshape(-1, 1)
if self.output_features is None:
self.output_features = [f'feature_{i}' for i in range(self.n_classes(true))]
self.confusion_matrix(true, predicted, prefix=prefix)
fname = os.path.join(self.path, f"{prefix}_prediction.csv")
pd.DataFrame(np.concatenate([true, predicted], axis=1),
columns=['true', 'predicted'], index=index).to_csv(fname)
class_metrics = ClassificationMetrics(true, predicted, multiclass=True)
dict_to_file(self.path,
errors=class_metrics.calculate_all(),
name=f"{prefix}_{dateandtime_now()}.json")
return
def process_binary(self, true, predicted, metrics, prefix, index, model=None):
assert self.n_classes(true) == 2
if model is not None:
try: # todo, also plot for DL
self.precision_recall_curve(model, x=true, y=predicted, prefix=prefix)
self.roc_curve(model, x=true, y=predicted, prefix=prefix)
except NotImplementedError:
pass
if predicted.ndim == 1:
predicted = predicted.reshape(-1, 1)
elif predicted.size != len(predicted):
predicted = np.argmax(predicted, axis=1).reshape(-1, 1)
if true.ndim == 1:
true = true.reshape(-1, 1)
elif true.size != len(true):
true = np.argmax(true, axis=1).reshape(-1, 1)
self.confusion_matrix(true, predicted, prefix=prefix)
fpath = os.path.join(self.path, prefix)
if not os.path.exists(fpath):
os.makedirs(fpath)
metrics_instance = ClassificationMetrics(true, predicted, multiclass=False)
metrics = getattr(metrics_instance, f"calculate_{metrics}")()
dict_to_file(fpath,
errors=metrics,
name=f"{prefix}_{dateandtime_now()}.json"
)
fname = os.path.join(fpath, f"{prefix}_.csv")
array = np.concatenate([true.reshape(-1, 1), predicted.reshape(-1, 1)], axis=1)
pd.DataFrame(array, columns=['true', 'predicted'], index=index).to_csv(fname)
return
def maybe_not_3d_data(self, true, predicted):
forecast_len = self.forecast_len
if true.ndim < 3:
if isinstance(forecast_len, dict):
forecast_len = set(list(forecast_len.values()))
assert len(forecast_len) == 1
forecast_len = forecast_len.pop()
assert forecast_len == 1, f'{forecast_len}'
axis = 2 if true.ndim == 2 else (1, 2)
true = np.expand_dims(true, axis=axis)
if predicted.ndim < 3:
assert forecast_len == 1
axis = 2 if predicted.ndim == 2 else (1, 2)
predicted = np.expand_dims(predicted, axis=axis)
return true, predicted
def plot_roc_curve(*args, **kwargs):
try:
func = sklearn.metrics.RocCurveDisplay.from_estimator
except AttributeError:
func = sklearn.metrics.plot_roc_curve
return func(*args, **kwargs)
def plot_precision_recall_curve(*args, **kwargs):
try:
func = sklearn.metrics.PrecisionRecallDisplay.from_estimator
except AttributeError:
func = sklearn.metrics.plot_precision_recall_curve
return func(*args, **kwargs)
def _wandb_scatter(true: np.ndarray, predicted: np.ndarray, name: str) -> None:
"""Adds a scatter plot on wandb."""
data = [[x, y] for (x, y) in zip(true.reshape(-1, ), predicted.reshape(-1, ))]
table = wandb.Table(data=data, columns=["true", "predicted"])
wandb.log({
"scatter_plot": wandb.plot.scatter(table, "true", "predicted",
title=name)
})
return | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/_process_predictions.py | _process_predictions.py |
from itertools import zip_longest
from ai4water.backend import np, mpl, plt
from ai4water.utils.visualizations import Plot
from ai4water.utils.utils import create_subplots
class LossCurve(Plot):
def __init__(self, path=None, show=1, save:bool=True):
self.path = path
self.show = show
super().__init__(path, save=save)
def plot_loss(self,
history: dict,
name="loss_curve",
figsize:tuple=None)->plt.Axes:
"""Considering history is a dictionary of different arrays, possible
training and validation loss arrays, this method plots those arrays."""
plt.clf()
plt.close('all')
plt.style.use('ggplot')
legends = {
'mean_absolute_error': 'Mean Absolute Error',
'mape': 'Mean Absolute Percentage Error',
'mean_squared_logarithmic_error': 'Mean Squared Logarithmic Error',
'pbias': "Percent Bias",
"nse": "Nash-Sutcliff Efficiency",
"kge": "Kling-Gupta Efficiency",
"tf_r2": "$R^{2}$",
"r2": "$R^{2}$"
}
epochs = range(1, len(history['loss']) + 1)
nplots = len(history)
val_losses = {}
losses = history.copy()
for k in history.keys():
val_key = f"val_{k}"
if val_key in history:
nplots -= 1
val_losses[val_key] = losses.pop(val_key)
fig, axis = create_subplots(nplots, figsize=figsize)
if not isinstance(axis, np.ndarray):
axis = np.array([axis])
axis = axis.flat
for idx, (key, loss), val_data in zip_longest(range(nplots), losses.items(), val_losses.items()):
ax = axis[idx]
if val_data is not None:
val_key, val_loss = val_data
ax.plot(epochs, val_loss, color=[0.96707953, 0.46268314, 0.45772886],
label='Validation ')
ax.legend()
ax.plot(epochs, loss, color=[0.13778617, 0.06228198, 0.33547859],
label='Training ')
ax.legend()
ax.set_xlabel("Epochs")
ax.set_ylabel(legends.get(key, key))
ax.set(frame_on=True)
self.save_or_show(fname=name, show=self.show)
mpl.rcParams.update(mpl.rcParamsDefault)
return ax
def choose_examples(x, examples_to_use, y=None):
"""Chooses examples from x and y"""
if isinstance(examples_to_use, int):
x = x[examples_to_use]
x = np.expand_dims(x, 0) # dimension must not decrease
index = np.array([examples_to_use])
elif isinstance(examples_to_use, float):
assert examples_to_use < 1.0
# randomly choose x fraction from test_x
x, index = choose_n_imp_exs(x, int(examples_to_use * len(x)), y)
elif hasattr(examples_to_use, '__len__'):
index = np.array(examples_to_use)
x = x[index]
else:
raise ValueError(f"unrecognized value of examples_to_use: {examples_to_use}")
return x, index
def choose_n_imp_exs(x: np.ndarray, n: int, y=None):
"""Chooses the n important examples from x and y"""
n = min(len(x), n)
st = n // 2
en = n - st
if y is None:
idx = np.random.randint(0, len(x), n)
else:
st = np.argsort(y, axis=0)[0:st].reshape(-1, )
en = np.argsort(y, axis=0)[-en:].reshape(-1, )
idx = np.hstack([st, en])
x = x[idx]
return x, idx | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/utils.py | utils.py |
import importlib
from typing import Callable, Union
from SALib.plotting.hdmr import plot
from SALib.plotting.bar import plot as barplot
from SALib.plotting.morris import covariance_plot
from ai4water.backend import easy_mpl as ep
from ai4water.backend import np, pd, plt, os
def sensitivity_analysis(
sampler:str,
analyzer:Union[str, list],
func:Callable,
bounds: list,
sampler_kwds: dict = None,
analyzer_kwds: dict = None,
names: list = None,
**kwargs
)->dict:
"""
Parameters
----------
sampler :
analyzer :
func :
bounds :
sampler_kwds :
analyzer_kwds :
names :
**kwargs :
"""
sampler = importlib.import_module(f"SALib.sample.{sampler}")
if names is None:
names = [f"Feat{i}" for i in range(len(bounds))]
# Define the model inputs
problem = {
'num_vars': len(bounds),
'names': names,
'bounds': bounds
}
sampler_kwds = sampler_kwds or {'N': 100}
param_values = sampler.sample(problem=problem, **sampler_kwds)
print("total samples:", len(param_values))
y = func(x=param_values, **kwargs)
y = np.array(y)
assert np.size(y) == len(y) , f"output must be 1 dimensional"
y = y.reshape(-1, )
results = {}
if isinstance(analyzer, list):
for _analyzer in analyzer:
print(f"Analyzing with {_analyzer}")
results[_analyzer] = analyze(_analyzer, param_values, y, problem, analyzer_kwds)
else:
assert isinstance(analyzer, str)
results[analyzer] = analyze(analyzer, param_values, y, problem, analyzer_kwds)
return results
def analyze(analyzer, param_values, y, problem, analyzer_kwds):
_analyzer = importlib.import_module(f"SALib.analyze.{analyzer}")
analyzer_kwds = analyzer_kwds or {}
if analyzer in ["hdmr",
"morris",
"dgsm",
"ff",
"pawn",
"rbd_fast", "delta",
] and 'X' not in analyzer_kwds:
analyzer_kwds['X'] = param_values
Si = _analyzer.analyze(problem=problem, Y=y, **analyzer_kwds)
if 'X' in analyzer_kwds:
analyzer_kwds.pop('X')
return Si
def sensitivity_plots(analyzer, si, path=None, show=False):
if analyzer == "morris":
morris_plots(si, path=path, show=show)
elif analyzer in ["sobol"]:
sobol_plots(si, show, path)
elif analyzer == "hdmr":
plt.close('all')
plot(si)
if path:
plt.savefig(os.path.join(path, "hdmr"), bbox_inches="tight")
elif analyzer in ["pawn"]:
plt.close('all')
si_df = si.to_df()
bar_plot(si_df[["CV", "median"]], conf_col="median")
if path:
plt.savefig(os.path.join(path, "pawn_cv"), bbox_inches="tight")
if show:
plt.show()
elif analyzer == "fast":
plt.close('all')
si_df = si.to_df()
bar_plot(si_df[["S1", "S1_conf"]])
if path:
plt.savefig(os.path.join(path, "fast_s1"), bbox_inches="tight")
if show:
plt.show()
plt.close('all')
bar_plot(si_df[["ST", "ST_conf"]])
if path:
plt.savefig(os.path.join(path, "fast_s1"), bbox_inches="tight")
if show:
plt.show()
elif analyzer == "delta":
plt.close('all')
si_df = si.to_df()
bar_plot(si_df[["delta", "delta_conf"]])
if path:
plt.savefig(os.path.join(path, "fast_s1"), bbox_inches="tight")
if show:
plt.show()
plt.close('all')
bar_plot(si_df[["S1", "S1_conf"]])
if path:
plt.savefig(os.path.join(path, "fast_s1"), bbox_inches="tight")
if show:
plt.show()
elif analyzer == "rbd_fast":
plt.close('all')
si_df = si.to_df()
bar_plot(si_df[["S1", "S1_conf"]])
if path:
plt.savefig(os.path.join(path, "rbd_fast_s1"), bbox_inches="tight")
if show:
plt.show()
return
def sobol_plots(si, show=False, path:str=None):
total, first, second = si.to_df()
plt.close('all')
bar_plot(total)
if path:
plt.savefig(os.path.join(path, "total"), bbox_inches="tight")
if show:
plt.show()
plt.close('all')
bar_plot(first)
if path:
plt.savefig(os.path.join(path, "first_order"), bbox_inches="tight")
if show:
plt.show()
fig, ax = plt.subplots(figsize=(16, 6))
bar_plot(second, ax=ax)
if path:
plt.savefig(os.path.join(path, "first_order"), bbox_inches="tight")
if show:
plt.show()
return
def morris_plots(si, show:bool=False, path:str=None, annotate=True):
plt.close('all')
si_df = si.to_df()
bar_plot(si_df[["mu_star", "mu_star_conf"]])
if show:
plt.show()
if path:
plt.savefig(os.path.join(path, "morris_bar_plot"), bbox_inches="tight")
fig, ax = plt.subplots()
covariance_plot(ax, si)
if si['sigma'] is not None and annotate:
y = si['sigma']
z = si['mu_star']
for i, txt in enumerate(si['names']):
ax.annotate(txt, (z[i], y[i]))
if show:
plt.show()
if path:
plt.savefig(os.path.join(path, "covariance_plot"), bbox_inches="tight")
plt.close('all')
barplot(si_df, ax=ax)
if path:
plt.savefig(os.path.join(path, "morris_bar_plot_all"), bbox_inches="tight")
if show:
plt.show()
return
def bar_plot(sis_df:pd.DataFrame, sort=True, conf_col = "_conf", **kwargs):
conf_cols = sis_df.columns.str.contains(conf_col)
sis = sis_df.loc[:, ~conf_cols].values
confs = sis_df.loc[:, conf_cols].values
names = sis_df.index
if isinstance(names[0], tuple):
names = np.array([str(i) for i in names])
if len(sis) == sis.size:
confs = confs.reshape(-1, )
sis = sis.reshape(-1,)
else:
raise ValueError
if sort:
sort_idx = np.argsort(sis)
confs = confs[sort_idx]
sis = sis[sort_idx]
names = names[sort_idx]
label = sis_df.columns[~conf_cols][0]
ax = ep.bar_chart(sis, names, orient="v", sort=sort, rotation=90, show=False,
label=label, **kwargs)
if sort:
ax.legend(loc="upper left")
else:
ax.legend(loc="best")
ax.errorbar(np.arange(len(sis)), sis, yerr=confs, fmt=".", color="black")
return ax
def _show_save(path=None, show=True):
if path:
plt.savefig(path, bbox_inches="tight")
if show:
plt.show()
return
def _make_predict_func(model, **kwargs):
from ai4water.preprocessing import DataSet
lookback = model.config["ts_args"]['lookback']
def func(x):
x = pd.DataFrame(x, columns=model.input_features)
ds = DataSet(data=x,
ts_args=model.config["ts_args"],
input_features=model.input_features,
train_fraction=1.0,
val_fraction=0.0,
verbosity=0)
x, _ = ds.training_data()
p = model.predict(x=x, **kwargs)
return np.concatenate([p, np.zeros((lookback-1, 1))])
return func | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/_sa.py | _sa.py |
import json
import warnings
import numpy as np
from typing import Union
from ai4water.utils.utils import ts_features
from .utils import msg
# TODO remove repeated calculation of mse, std, mean etc
# TODO make weights, class attribute
# TODO write tests
# TODO standardized residual sum of squares
# http://documentation.sas.com/?cdcId=fscdc&cdcVersion=15.1&docsetId=fsug&docsetTarget=n1sm8nk3229ttun187529xtkbtpu.htm&locale=en
# https://arxiv.org/ftp/arxiv/papers/1809/1809.03006.pdf
# https://www.researchgate.net/profile/Mark-Tschopp/publication/322147437_Quantifying_Similarity_and_Distance_Measures_for_Vector-Based_Datasets_Histograms_Signals_and_Probability_Distribution_Functions/links/5a48089ca6fdcce1971c8142/Quantifying-Similarity-and-Distance-Measures-for-Vector-Based-Datasets-Histograms-Signals-and-Probability-Distribution-Functions.pdf
# maximum absolute error
# relative mean absolute error
# relative rmse
# Log mse
# Jeffreys Divergence
# kullback-Leibler divergence
# Peak flow ratio https://hess.copernicus.org/articles/24/869/2020/
# Legates׳s coefficient of efficiency
# outliear percentage : pysteps
# mean squared error skill score, mean absolute error skill score, https://doi.org/10.1016/j.ijforecast.2018.11.010
# root mean quartic error, Kolmogorov–Smirnov test integral, OVERPer, Rényi entropy,
# 95th percentile: https://doi.org/10.1016/j.solener.2014.10.016
# Friedman test: https://doi.org/10.1016/j.solener.2014.10.016
EPS = 1e-10 # epsilon
# TODO classification metrics
# cross entropy
# TODO probability losses
# log normal loss
# skill score
# TODO multi horizon metrics
class Metrics(object):
warnings.warn(msg("Metrics"), UserWarning)
"""
This class does some pre-processign and handles metadata regaring true and
predicted arrays.
The arguments other than `true` and `predicted` are dynamic i.e. they can be
changed from outside the class. This means the user can change their value after
creating the class. This will be useful if we want to calculate an error once by
ignoring NaN and then by not ignoring the NaNs. However, the user has to run
the method `treat_arrays` in order to have the changed values impact on true and
predicted arrays.
Literature:
https://www-miklip.dkrz.de/about/murcss/
"""
def __init__(self,
true: Union[np.ndarray, list],
predicted: Union[np.ndarray, list],
replace_nan: Union[int, float, None] = None,
replace_inf: Union[int, float, None] = None,
remove_zero: bool = False,
remove_neg: bool = False,
metric_type: str = 'regression'
):
"""
Arguments:
true : array like, ture/observed/actual/target values
predicted : array like, simulated values
replace_nan : default None. if not None, then NaNs in true
and predicted will be replaced by this value.
replace_inf : default None, if not None, then inf vlaues in true and
predicted will be replaced by this value.
remove_zero : default False, if True, the zero values in true
or predicted arrays will be removed. If a zero is found in one
array, the corresponding value in the other array will also be
removed.
remove_neg : default False, if True, the negative values in true
or predicted arrays will be removed.
metric_type : type of metric.
"""
self.metric_type = metric_type
self.true, self.predicted = self._pre_process(true, predicted)
self.replace_nan = replace_nan
self.replace_inf = replace_inf
self.remove_zero = remove_zero
self.remove_neg = remove_neg
@staticmethod
def _minimal() -> list:
raise NotImplementedError
@staticmethod
def _scale_independent_metrics() -> list:
raise NotImplementedError
@staticmethod
def _scale_dependent_metrics() -> list:
raise NotImplementedError
@property
def replace_nan(self):
return self._replace_nan
@replace_nan.setter
def replace_nan(self, x):
self._replace_nan = x
@property
def replace_inf(self):
return self._replace_inf
@replace_inf.setter
def replace_inf(self, x):
self._replace_inf = x
@property
def remove_zero(self):
return self._remove_zero
@remove_zero.setter
def remove_zero(self, x):
self._remove_zero = x
@property
def remove_neg(self):
return self._remove_neg
@remove_neg.setter
def remove_neg(self, x):
self._remove_neg = x
@property
def assert_greater_than_one(self):
# assert that both true and predicted arrays are greater than one.
if len(self.true) <= 1 or len(self.predicted) <= 1:
raise ValueError(f"""Expect length of true and predicted arrays to be larger than 1 but they are
{len(self.true)} and {len(self.predicted)}""")
return
def _pre_process(self, true, predicted):
predicted = self._assert_1darray(predicted)
true = self._assert_1darray(true)
assert len(predicted) == len(true), "lengths of provided arrays mismatch, predicted array: {}, true array: {}" \
.format(len(predicted), len(true))
return true, predicted
def _assert_1darray(self, array_like) -> np.ndarray:
"""Makes sure that the provided `array_like` is 1D numpy array"""
if not isinstance(array_like, np.ndarray):
if not isinstance(array_like, list):
# it can be pandas series or datafrmae
if array_like.__class__.__name__ in ['Series', 'DataFrame']:
if len(array_like.shape) > 1: # 1d series has shape (x,) while 1d dataframe has shape (x,1)
if array_like.shape[1] > 1: # it is a 2d datafrmae
raise TypeError("only 1d pandas Series or dataframe are allowed")
np_array = np.array(array_like).reshape(-1, )
else:
raise TypeError(f"all inputs must be numpy array or list but one is of type {type(array_like)}")
else:
np_array = np.array(array_like).reshape(-1, )
else:
if np.ndim(array_like) > 1:
sec_dim = array_like.shape[1]
if self.metric_type != 'classification' and sec_dim > 1:
raise ValueError(f"Array must not be 2d but it has shape {array_like.shape}")
np_array = np.array(array_like).reshape(-1, ) if self.metric_type != 'classification' else array_like
else:
# maybe the dimension is >1 so make sure it is more
np_array = array_like.reshape(-1, ) if self.metric_type != 'classification' else array_like
if self.metric_type != 'classification':
assert len(np_array.shape) == 1
return np_array
def calculate_all(self, statistics=False, verbose=False, write=False, name=None) -> dict:
""" calculates errors using all available methods except brier_score..
write: bool, if True, will write the calculated errors in file.
name: str, if not None, then must be path of the file in which to write."""
errors = {}
for m in self.all_methods:
if m not in ["brier_score"]:
try:
error = float(getattr(self, m)())
# some errors might not have been computed and returned a non float-convertible value e.g. None
except TypeError:
error = getattr(self, m)()
errors[m] = error
if verbose:
if error is None:
print('{0:25} : {1}'.format(m, error))
else:
print('{0:25} : {1:<12.3f}'.format(m, error))
if statistics:
errors['stats'] = self.stats(verbose=verbose)
if write:
if name is not None:
assert isinstance(name, str)
fname = name
else:
fname = 'errors'
with open(fname + ".json", 'w') as fp:
json.dump(errors, fp, sort_keys=True, indent=4)
return errors
def calculate_minimal(self) -> dict:
"""
Calculates some basic metrics.
Returns
-------
dict
Dictionary with all metrics
"""
metrics = {}
for metric in self._minimal():
metrics[metric] = getattr(self, metric)()
return metrics
def _error(self, true=None, predicted=None):
""" simple difference """
if true is None:
true = self.true
if predicted is None:
predicted = self.predicted
return true - predicted
def _percentage_error(self):
"""
Percentage error
"""
return self._error() / (self.true + EPS) * 100
def _naive_prognose(self, seasonality: int = 1):
""" Naive forecasting method which just repeats previous samples """
return self.true[:-seasonality]
def _relative_error(self, benchmark: np.ndarray = None):
""" Relative Error """
if benchmark is None or isinstance(benchmark, int):
# If no benchmark prediction provided - use naive forecasting
if not isinstance(benchmark, int):
seasonality = 1
else:
seasonality = benchmark
return self._error(self.true[seasonality:], self.predicted[seasonality:]) / \
(self._error(self.true[seasonality:], self._naive_prognose(seasonality)) + EPS)
return self._error() / (self._error(self.true, benchmark) + EPS)
def _bounded_relative_error(self, benchmark: np.ndarray = None):
""" Bounded Relative Error """
if benchmark is None or isinstance(benchmark, int):
# If no benchmark prediction provided - use naive forecasting
if not isinstance(benchmark, int):
seasonality = 1
else:
seasonality = benchmark
abs_err = np.abs(self._error(self.true[seasonality:], self.predicted[seasonality:]))
abs_err_bench = np.abs(self._error(self.true[seasonality:], self._naive_prognose(seasonality)))
else:
abs_err = np.abs(self._error())
abs_err_bench = np.abs(self._error())
return abs_err / (abs_err + abs_err_bench + EPS)
def _ae(self):
"""Absolute error """
return np.abs(self.true - self.predicted)
def calculate_scale_independent_metrics(self) -> dict:
"""
Calculates scale independent metrics
Returns
-------
dict
Dictionary with all metrics
"""
metrics = {}
for metric in self._scale_independent_metrics():
metrics[metric] = getattr(self, metric)()
return metrics
def calculate_scale_dependent_metrics(self) -> dict:
"""
Calculates scale dependent metrics
Returns
-------
dict
Dictionary with all metrics
"""
metrics = {}
for metric in self._scale_dependent_metrics():
metrics[metric] = getattr(self, metric)()
return metrics
def scale_dependent_metrics(self):
pass
def stats(self, verbose: bool = False) -> dict:
""" returs some important stats about true and predicted values."""
_stats = dict()
_stats['true'] = ts_features(self.true)
_stats['predicted'] = ts_features(self.predicted)
if verbose:
print("\nName True Predicted ")
print("----------------------------------------")
for k in _stats['true'].keys():
print("{:<25}, {:<10}, {:<10}"
.format(k, round(_stats['true'][k], 4), round(_stats['predicted'][k])))
return _stats
def percentage_metrics(self):
pass
def relative_metrics(self):
pass
def composite_metrics(self):
pass
def treat_values(self):
"""
This function is applied by default at the start/at the time of initiating
the class. However, it can used any time after that. This can be handy
if we want to calculate error first by ignoring nan and then by no ignoring
nan.
Adopting from https://github.com/BYU-Hydroinformatics/HydroErr/blob/master/HydroErr/HydroErr.py#L6210
Removes the nan, negative, and inf values in two numpy arrays
"""
sim_copy = np.copy(self.predicted)
obs_copy = np.copy(self.true)
# Treat missing data in observed_array and simulated_array, rows in simulated_array or
# observed_array that contain nan values
all_treatment_array = np.ones(obs_copy.size, dtype=bool)
if np.any(np.isnan(obs_copy)) or np.any(np.isnan(sim_copy)):
if self.replace_nan is not None:
# Finding the NaNs
sim_nan = np.isnan(sim_copy)
obs_nan = np.isnan(obs_copy)
# Replacing the NaNs with the input
sim_copy[sim_nan] = self.replace_nan
obs_copy[obs_nan] = self.replace_nan
warnings.warn("Elements(s) {} contained NaN values in the simulated array and "
"elements(s) {} contained NaN values in the observed array and have been "
"replaced (Elements are zero indexed).".format(np.where(sim_nan)[0],
np.where(obs_nan)[0]),
UserWarning)
else:
# Getting the indices of the nan values, combining them, and informing user.
nan_indices_fcst = ~np.isnan(sim_copy)
nan_indices_obs = ~np.isnan(obs_copy)
all_nan_indices = np.logical_and(nan_indices_fcst, nan_indices_obs)
all_treatment_array = np.logical_and(all_treatment_array, all_nan_indices)
warnings.warn("Row(s) {} contained NaN values and the row(s) have been "
"removed (Rows are zero indexed).".format(np.where(~all_nan_indices)[0]),
UserWarning)
if np.any(np.isinf(obs_copy)) or np.any(np.isinf(sim_copy)):
if self.replace_nan is not None:
# Finding the NaNs
sim_inf = np.isinf(sim_copy)
obs_inf = np.isinf(obs_copy)
# Replacing the NaNs with the input
sim_copy[sim_inf] = self.replace_inf
obs_copy[obs_inf] = self.replace_inf
warnings.warn("Elements(s) {} contained Inf values in the simulated array and "
"elements(s) {} contained Inf values in the observed array and have been "
"replaced (Elements are zero indexed).".format(np.where(sim_inf)[0],
np.where(obs_inf)[0]),
UserWarning)
else:
inf_indices_fcst = ~(np.isinf(sim_copy))
inf_indices_obs = ~np.isinf(obs_copy)
all_inf_indices = np.logical_and(inf_indices_fcst, inf_indices_obs)
all_treatment_array = np.logical_and(all_treatment_array, all_inf_indices)
warnings.warn(
"Row(s) {} contained Inf or -Inf values and the row(s) have been removed (Rows "
"are zero indexed).".format(np.where(~all_inf_indices)[0]),
UserWarning
)
# Treat zero data in observed_array and simulated_array, rows in simulated_array or
# observed_array that contain zero values
if self.remove_zero:
if (obs_copy == 0).any() or (sim_copy == 0).any():
zero_indices_fcst = ~(sim_copy == 0)
zero_indices_obs = ~(obs_copy == 0)
all_zero_indices = np.logical_and(zero_indices_fcst, zero_indices_obs)
all_treatment_array = np.logical_and(all_treatment_array, all_zero_indices)
warnings.warn(
"Row(s) {} contained zero values and the row(s) have been removed (Rows are "
"zero indexed).".format(np.where(~all_zero_indices)[0]),
UserWarning
)
# Treat negative data in observed_array and simulated_array, rows in simulated_array or
# observed_array that contain negative values
# Ignore runtime warnings from comparing
if self.remove_neg:
with np.errstate(invalid='ignore'):
obs_copy_bool = obs_copy < 0
sim_copy_bool = sim_copy < 0
if obs_copy_bool.any() or sim_copy_bool.any():
neg_indices_fcst = ~sim_copy_bool
neg_indices_obs = ~obs_copy_bool
all_neg_indices = np.logical_and(neg_indices_fcst, neg_indices_obs)
all_treatment_array = np.logical_and(all_treatment_array, all_neg_indices)
warnings.warn("Row(s) {} contained negative values and the row(s) have been "
"removed (Rows are zero indexed).".format(np.where(~all_neg_indices)[0]),
UserWarning)
self.true = obs_copy[all_treatment_array]
self.predicted = sim_copy[all_treatment_array]
return
def mse(self, weights=None) -> float:
""" mean square error """
return float(np.average((self.true - self.predicted) ** 2, axis=0, weights=weights)) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/SeqMetrics/_main.py | _main.py |
import warnings
import numpy as np
from sklearn import preprocessing
# from sklearn.metrics import hinge_loss
from sklearn.metrics import balanced_accuracy_score
import sklearn
from .utils import list_subclass_methods, msg
from ._main import Metrics
from ai4water.backend import get_attributes
CLS_METRICS = get_attributes(sklearn.metrics, '_classification', case_sensitive=True)
class ClassificationMetrics(Metrics):
"""Calculates classification metrics."""
# todo add very major erro and major error
warnings.warn(msg("ClassificationMetrics"), UserWarning)
def __init__(self, *args, multiclass=False, **kwargs):
self.multiclass = multiclass
super().__init__(*args, metric_type='classification', **kwargs)
self.true_labels = self._true_labels()
self.true_logits = self._true_logits()
self.pred_labels = self._pred_labels()
self.pred_logits = self._pred_logits()
self.all_methods = list_subclass_methods(ClassificationMetrics, True)
# self.all_methods = [m for m in all_methods if not m.startswith('_')]
def __getattr__(self, item):
def func(**kwargs): # because we want .f1_score() and not .f1_score
return CLS_METRICS[item](self.true_labels, self.pred_labels, **kwargs)
return func
@staticmethod
def _minimal() -> list:
"""some minimal and basic metrics"""
return list_subclass_methods(ClassificationMetrics, True)
@staticmethod
def _hydro_metrics() -> list:
"""some minimal and basic metrics"""
return list_subclass_methods(ClassificationMetrics, True)
def _num_classes(self):
return len(self._classes())
def _classes(self):
array = self.true_labels
return np.unique(array[~np.isnan(array)])
def _true_labels(self):
"""retuned array is 1d"""
if self.multiclass:
if self.true.size == len(self.true):
return self.true.reshape(-1,1)
return np.argmax(self.true.reshape(-1,1), axis=1)
# it should be 1 dimensional
assert self.true.size == len(self.true)
return self.true.reshape(-1,)
def _true_logits(self):
"""returned array is 2d"""
if self.multiclass:
return self.true
lb = preprocessing.LabelBinarizer()
return lb.fit_transform(self.true)
def _pred_labels(self):
"""returns 1d"""
if self.multiclass:
if self.predicted.size == len(self.predicted):
return self.predicted.reshape(-1,1)
return np.argmax(self.predicted.reshape(-1,1), axis=1)
lb = preprocessing.LabelBinarizer()
lb.fit(self.true_labels)
return lb.inverse_transform(self.predicted)
def _pred_logits(self):
"""returned array is 2d"""
if self.multiclass:
return self.true
# we can't do it
return None
def cross_entropy(self, epsilon=1e-12):
"""
Computes cross entropy between targets (encoded as one-hot vectors)
and predictions.
Input: predictions (N, k) ndarray
targets (N, k) ndarray
Returns: scalar
"""
predictions = np.clip(self.predicted, epsilon, 1. - epsilon)
n = predictions.shape[0]
ce = -np.sum(self.true * np.log(predictions + 1e-9)) / n
return ce
# def hinge_loss(self):
# """hinge loss using sklearn"""
# if self.pred_logits is not None:
# return hinge_loss(self.true_labels, self.pred_logits)
# return None
def balanced_accuracy_score(self):
return balanced_accuracy_score(self.true_labels, self.pred_labels)
def accuracy(self, normalize=True):
if normalize:
return np.average(self.true_labels==self.pred_labels)
return (self.true_labels==self.pred_labels).sum() | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/SeqMetrics/_cls.py | _cls.py |
import warnings
from math import sqrt
from typing import Union
from scipy.stats import gmean, kendalltau
import numpy as np
from .utils import _geometric_mean, _mean_tweedie_deviance, _foo, list_subclass_methods, msg
from ._main import Metrics, EPS
class RegressionMetrics(Metrics):
"""
Calculates more than 100 regression performance metrics related to sequence data.
Example:
>>>import numpy as np
>>>from ai4water.postprocessing.SeqMetrics import RegressionMetrics
>>>t = np.random.random(10)
>>>p = np.random.random(10)
>>>errors = RegressionMetrics(t,p)
>>>all_errors = errors.calculate_all()
"""
warnings.warn(msg("RegressionMetrics"), UserWarning)
def __init__(self, *args, **kwargs):
"""
Initializes `Metrics`.
args and kwargs go to parent class ['Metrics'][ai4water.postprocessing.SeqMetrics.Metrics].
"""
super().__init__(*args, **kwargs)
self.all_methods = list_subclass_methods(RegressionMetrics, True,
additional_ignores=['calculate_hydro_metrics',
# 'calculate_scale_dependent_metrics',
# 'calculate_scale_independent_metrics'
])
# if arrays contain negative values, following three errors can not be computed
for array in [self.true, self.predicted]:
assert len(array) > 0, "Input arrays should not be empty"
if len(array[array < 0.0]) > 0:
self.all_methods = [m for m in self.all_methods if m not in ('mean_gamma_deviance',
'mean_poisson_deviance',
'mean_square_log_error')]
if (array <= 0).any(): # mean tweedie error is not computable
self.all_methods = [m for m in self.all_methods if m not in ('mean_gamma_deviance',
'mean_poisson_deviance')]
def _hydro_metrics(self) -> list:
"""Names of metrics related to hydrology"""
return self._minimal() + [
'fdc_flv', 'fdc_fhv',
'kge', 'kge_np', 'kge_mod', 'kge_bound', 'kgeprime_c2m', 'kgenp_bound',
'nse', 'nse_alpha', 'nse_beta', 'nse_mod', 'nse_bound']
@staticmethod
def _scale_independent_metrics() -> list:
"""Names of scale independent metrics."""
return ['mape', 'r2', 'nse']
@staticmethod
def _scale_dependent_metrics() -> list:
"""Names of scale dependent metrics."""
return ['mse', 'rmse', 'mae']
@staticmethod
def _minimal() -> list:
"""some minimal and basic metrics"""
return ['r2', 'mape', 'nrmse', 'corr_coeff', 'rmse', 'mae', 'mse', 'mpe',
'mase', 'r2_score']
def abs_pbias(self) -> float:
"""Absolute Percent bias"""
_apb = 100.0 * sum(abs(self.predicted - self.true)) / sum(self.true) # Absolute percent bias
return float(_apb)
def acc(self) -> float:
"""Anomaly correction coefficient.
Reference:
[Langland et al., 2012](https://doi.org/10.3402/tellusa.v64i0.17531).
Miyakoda et al., 1972. Murphy et al., 1989."""
a = self.predicted - np.mean(self.predicted)
b = self.true - np.mean(self.true)
c = np.std(self.true, ddof=1) * np.std(self.predicted, ddof=1) * self.predicted.size
return float(np.dot(a, b / c))
def adjusted_r2(self) -> float:
"""Adjusted R squared."""
k = 1
n = len(self.predicted)
adj_r = 1 - ((1 - self.r2()) * (n - 1)) / (n - k - 1)
return float(adj_r)
def agreement_index(self) -> float:
"""
Agreement Index (d) developed by [Willmott, 1981](https://doi.org/10.1080/02723646.1981.10642213).
It detects additive and pro-portional differences in the observed and
simulated means and vari-ances [Moriasi et al., 2015](https://doi.org/10.13031/trans.58.10715).
It is overly sensitive to extreme values due to the squared
differences [2]. It can also be used as a substitute for R2 to
identify the degree to which model predic-tions
are error-free [2].
.. math::
d = 1 - \\frac{\\sum_{i=1}^{N}(e_{i} - s_{i})^2}{\\sum_{i=1}^{N}(\\left | s_{i} - \\bar{e}
\\right | + \\left | e_{i} - \\bar{e} \\right |)^2}
[2] Legates and McCabe, 199
"""
agreement_index = 1 - (np.sum((self.true - self.predicted) ** 2)) / (np.sum(
(np.abs(self.predicted - np.mean(self.true)) + np.abs(self.true - np.mean(self.true))) ** 2))
return float(agreement_index)
def aic(self, p=1) -> float:
"""
[Akaike’s Information Criterion](https://doi.org/10.1007/978-1-4612-1694-0_15)
Modifying from https://github.com/UBC-MDS/RegscorePy/blob/master/RegscorePy/aic.py
"""
assert p > 0
self.assert_greater_than_one # noac
n = len(self.true)
resid = np.subtract(self.predicted, self.true)
rss = np.sum(np.power(resid, 2))
return float(n * np.log(rss / n) + 2 * p)
def aitchison(self, center='mean') -> float:
"""Aitchison distance. used in [Zhang et al., 2020](https://doi.org/10.5194/hess-24-2505-2020)"""
lx = np.log(self.true)
ly = np.log(self.predicted)
if center.upper() == 'MEAN':
m = np.mean
elif center.upper() == 'MEDIAN':
m = np.median
else:
raise ValueError
clr_x = lx - m(lx)
clr_y = ly - m(ly)
d = (sum((clr_x - clr_y) ** 2)) ** 0.5
return float(d)
def amemiya_adj_r2(self) -> float:
"""Amemiya’s Adjusted R-squared"""
k = 1
n = len(self.predicted)
adj_r = 1 - ((1 - self.r2()) * (n + k)) / (n - k - 1)
return float(adj_r)
def amemiya_pred_criterion(self) -> float:
"""Amemiya’s Prediction Criterion"""
k = 1
n = len(self.predicted)
return float(((n + k) / (n - k)) * (1 / n) * self.sse())
def bias(self) -> float:
"""
Bias as shown in https://doi.org/10.1029/97WR03495 and given by
[Gupta et al., 1998](https://doi.org/10.1080/02626667.2018.1552002
.. math::
Bias=\\frac{1}{N}\\sum_{i=1}^{N}(e_{i}-s_{i})
"""
bias = np.nansum(self.true - self.predicted) / len(self.true)
return float(bias)
def bic(self, p=1) -> float:
"""
Bayesian Information Criterion
Minimising the BIC is intended to give the best model. The
model chosen by the BIC is either the same as that chosen by the AIC, or one
with fewer terms. This is because the BIC penalises the number of parameters
more heavily than the AIC [1].
Modified after https://github.com/UBC-MDS/RegscorePy/blob/master/RegscorePy/bic.py
[1]: https://otexts.com/fpp2/selecting-predictors.html#schwarzs-bayesian-information-criterion
"""
assert p >= 0
n = len(self.true)
return float(n * np.log(self.sse() / n) + p * np.log(n))
def brier_score(self) -> float:
"""
Adopted from https://github.com/PeterRochford/SkillMetrics/blob/master/skill_metrics/brier_score.py
Calculates the Brier score (BS), a measure of the mean-square error of
probability forecasts for a dichotomous (two-category) event, such as
the occurrence/non-occurrence of precipitation. The score is calculated
using the formula:
BS = sum_(n=1)^N (f_n - o_n)^2/N
where f is the forecast probabilities, o is the observed probabilities
(0 or 1), and N is the total number of values in f & o. Note that f & o
must have the same number of values, and those values must be in the
range [0,1].
https://data.library.virginia.edu/a-brief-on-brier-scores/
Output:
BS : Brier score
Reference:
Glenn W. Brier, 1950: Verification of forecasts expressed in terms
of probabilities. Mon. We. Rev., 78, 1-23.
D. S. Wilks, 1995: Statistical Methods in the Atmospheric Sciences.
Cambridge Press. 547 pp.
"""
# Check for valid values
index = np.where(np.logical_or(self.predicted < 0, self.predicted > 1))
if np.sum(index) > 0:
msg = 'Forecast has values outside interval [0,1].'
raise ValueError(msg)
index = np.where(np.logical_and(self.true != 0, self.true != 1))
if np.sum(index) > 0:
msg = 'Observed has values not equal to 0 or 1.'
raise ValueError(msg)
# Calculate score
bs = np.sum(np.square(self.predicted - self.true)) / len(self.predicted)
return bs
def corr_coeff(self) -> float:
"""
Pearson correlation coefficient.
It measures linear correlatin between true and predicted arrays.
It is sensitive to outliers.
Reference: Pearson, K 1895.
.. math::
r = \\frac{\\sum ^n _{i=1}(e_i - \\bar{e})(s_i - \\bar{s})}{\\sqrt{\\sum ^n _{i=1}(e_i - \\bar{e})^2}
\\sqrt{\\sum ^n _{i=1}(s_i - \\bar{s})^2}}
"""
correlation_coefficient = np.corrcoef(self.true, self.predicted)[0, 1]
return float(correlation_coefficient)
def covariance(self) -> float:
"""
Covariance
.. math::
Covariance = \\frac{1}{N} \\sum_{i=1}^{N}((e_{i} - \\bar{e}) * (s_{i} - \\bar{s}))
"""
obs_mean = np.mean(self.true)
sim_mean = np.mean(self.predicted)
covariance = np.mean((self.true - obs_mean) * (self.predicted - sim_mean))
return float(covariance)
def cronbach_alpha(self) -> float:
"""
It is a measure of internal consitency of data
https://stats.idre.ucla.edu/spss/faq/what-does-cronbachs-alpha-mean/
https://stackoverflow.com/a/20799687/5982232
"""
itemscores = np.stack([self.true, self.predicted])
itemvars = itemscores.var(axis=1, ddof=1)
tscores = itemscores.sum(axis=0)
nitems = len(itemscores)
return float(nitems / (nitems - 1.) * (1 - itemvars.sum() / tscores.var(ddof=1)))
def centered_rms_dev(self) -> float:
"""
Modified after https://github.com/PeterRochford/SkillMetrics/blob/master/skill_metrics/centered_rms_dev.py
Calculates the centered root-mean-square (RMS) difference between true and predicted
using the formula:
(E')^2 = sum_(n=1)^N [(p_n - mean(p))(r_n - mean(r))]^2/N
where p is the predicted values, r is the true values, and
N is the total number of values in p & r.
Output:
CRMSDIFF : centered root-mean-square (RMS) difference (E')^2
"""
# Calculate means
pmean = np.mean(self.predicted)
rmean = np.mean(self.true)
# Calculate (E')^2
crmsd = np.square((self.predicted - pmean) - (self.true - rmean))
crmsd = np.sum(crmsd) / self.predicted.size
crmsd = np.sqrt(crmsd)
return float(crmsd)
def cosine_similarity(self) -> float:
"""[cosine similary](https://en.wikipedia.org/wiki/Cosine_similarity)
It is a judgment of orientation and not magnitude: two vectors with
the same orientation have a cosine similarity of 1, two vectors oriented
at 90° relative to each other have a similarity of 0, and two vectors diametrically
opposed have a similarity of -1, independent of their magnitude.
"""
return float(np.dot(self.true.reshape(-1,),
self.predicted.reshape(-1,)) /
(np.linalg.norm(self.true) * np.linalg.norm(self.predicted)))
def decomposed_mse(self) -> float:
"""
Decomposed MSE developed by Kobayashi and Salam (2000)
.. math ::
dMSE = (\\frac{1}{N}\\sum_{i=1}^{N}(e_{i}-s_{i}))^2 + SDSD + LCS
SDSD = (\\sigma(e) - \\sigma(s))^2
LCS = 2 \\sigma(e) \\sigma(s) * (1 - \\frac{\\sum ^n _{i=1}(e_i - \\bar{e})(s_i - \\bar{s})}
{\\sqrt{\\sum ^n _{i=1}(e_i - \\bar{e})^2} \\sqrt{\\sum ^n _{i=1}(s_i - \\bar{s})^2}})
"""
e_std = np.std(self.true)
s_std = np.std(self.predicted)
bias_squared = self.bias() ** 2
sdsd = (e_std - s_std) ** 2
lcs = 2 * e_std * s_std * (1 - self.corr_coeff())
decomposed_mse = bias_squared + sdsd + lcs
return float(decomposed_mse)
def euclid_distance(self) -> float:
"""Euclidian distance
Referneces: Kennard et al., 2010
"""
return float(np.linalg.norm(self.true - self.predicted))
def exp_var_score(self, weights=None) -> Union[float, None]:
"""
Explained variance score
https://stackoverflow.com/questions/24378176/python-sci-kit-learn-metrics-difference-between-r2-score-and-explained-varian
best value is 1, lower values are less accurate.
"""
y_diff_avg = np.average(self.true - self.predicted, weights=weights, axis=0)
numerator = np.average((self.true - self.predicted - y_diff_avg) ** 2,
weights=weights, axis=0)
y_true_avg = np.average(self.true, weights=weights, axis=0)
denominator = np.average((self.true - y_true_avg) ** 2,
weights=weights, axis=0)
if numerator == 0.0:
return None
output_scores = _foo(denominator, numerator)
return float(np.average(output_scores, weights=weights))
def expanded_uncertainty(self, cov_fact=1.96) -> float:
"""By default it calculates uncertainty with 95% confidence interval. 1.96 is the coverage factor
corresponding 95% confidence level [2]. This indicator is used in order to show more information about the
model deviation [2].
Using formula from by [1] and [2].
[1] https://doi.org/10.1016/j.enconman.2015.03.067
[2] https://doi.org/10.1016/j.rser.2014.07.117
"""
sd = np.std(self._error(self.true, self.predicted))
return float(cov_fact * np.sqrt(sd ** 2 + self.rmse() ** 2))
def fdc_fhv(self, h: float = 0.02) -> float:
"""
modified after: https://github.com/kratzert/ealstm_regional_modeling/blob/64a446e9012ecd601e0a9680246d3bbf3f002f6d/papercode/metrics.py#L190
Peak flow bias of the flow duration curve (Yilmaz 2008).
used in kratzert et al., 2018
Returns
-------
float
Bias of the peak flows
Raises
------
RuntimeError
If `h` is not in range(0,1)
"""
if (h <= 0) or (h >= 1):
raise RuntimeError("h has to be in the range (0,1)")
# sort both in descending order
obs = -np.sort(-self.true)
sim = -np.sort(-self.predicted)
# subset data to only top h flow values
obs = obs[:np.round(h * len(obs)).astype(int)]
sim = sim[:np.round(h * len(sim)).astype(int)]
fhv = np.sum(sim - obs) / (np.sum(obs) + 1e-6)
return float(fhv * 100)
def fdc_flv(self, low_flow: float = 0.3) -> float:
"""
bias of the bottom 30 % low flows
modified after: https://github.com/kratzert/ealstm_regional_modeling/blob/64a446e9012ecd601e0a9680246d3bbf3f002f6d/papercode/metrics.py#L237
used in kratzert et al., 2018
Parameters
----------
low_flow : float, optional
Upper limit of the flow duration curve. E.g. 0.3 means the bottom 30% of the flows are
considered as low flows, by default 0.3
Returns
-------
float
Bias of the low flows.
Raises
------
RuntimeError
If `low_flow` is not in the range(0,1)
"""
low_flow = 1.0 - low_flow
# make sure that metric is calculated over the same dimension
obs = self.true.flatten()
sim = self.predicted.flatten()
if (low_flow <= 0) or (low_flow >= 1):
raise RuntimeError("l has to be in the range (0,1)")
# for numerical reasons change 0s to 1e-6
sim[sim == 0] = 1e-6
obs[obs == 0] = 1e-6
# sort both in descending order
obs = -np.sort(-obs)
sim = -np.sort(-sim)
# subset data to only top h flow values
obs = obs[np.round(low_flow * len(obs)).astype(int):]
sim = sim[np.round(low_flow * len(sim)).astype(int):]
# transform values to log scale
obs = np.log(obs + 1e-6)
sim = np.log(sim + 1e-6)
# calculate flv part by part
qsl = np.sum(sim - sim.min())
qol = np.sum(obs - obs.min())
flv = -1 * (qsl - qol) / (qol + 1e-6)
return float(flv * 100)
def gmae(self) -> float:
""" Geometric Mean Absolute Error """
return _geometric_mean(np.abs(self._error()))
def gmean_diff(self) -> float:
"""Geometric mean difference. First geometric mean is calculated for each of two samples and their difference
is calculated."""
sim_log = np.log1p(self.predicted)
obs_log = np.log1p(self.true)
return float(np.exp(gmean(sim_log) - gmean(obs_log)))
def gmrae(self, benchmark: np.ndarray = None) -> float:
""" Geometric Mean Relative Absolute Error """
return _geometric_mean(np.abs(self._relative_error(benchmark)))
def calculate_hydro_metrics(self):
"""
Calculates all metrics for hydrological data.
Returns
-------
dict
Dictionary with all metrics
"""
metrics = {}
for metric in self._hydro_metrics():
metrics[metric] = getattr(self, metric)()
return metrics
def inrse(self) -> float:
""" Integral Normalized Root Squared Error """
return float(np.sqrt(np.sum(np.square(self._error())) / np.sum(np.square(self.true - np.mean(self.true)))))
def irmse(self) -> float:
"""Inertial RMSE. RMSE divided by standard deviation of the gradient of true."""
# Getting the gradient of the observed data
obs_len = self.true.size
obs_grad = self.true[1:obs_len] - self.true[0:obs_len - 1]
# Standard deviation of the gradient
obs_grad_std = np.std(obs_grad, ddof=1)
# Divide RMSE by the standard deviation of the gradient of the observed data
return float(self.rmse() / obs_grad_std)
def JS(self) -> float:
"""Jensen-shannon divergence"""
warnings.filterwarnings("ignore", category=RuntimeWarning)
d1 = self.true * np.log2(2 * self.true / (self.true + self.predicted))
d2 = self.predicted * np.log2(2 * self.predicted / (self.true + self.predicted))
d1[np.isnan(d1)] = 0
d2[np.isnan(d2)] = 0
d = 0.5 * sum(d1 + d2)
return float(d)
def kendaull_tau(self, return_p=False) -> Union[float, tuple]:
"""Kendall's tau
https://machinelearningmastery.com/how-to-calculate-nonparametric-rank-correlation-in-python/
used in https://www.jmlr.org/papers/volume20/18-444/18-444.pdf
"""
coef, p = kendalltau(self.true, self.predicted)
if return_p:
return coef, p
return float(p)
def kge(self, return_all=False):
"""
Kling-Gupta Efficiency
Gupta, Kling, Yilmaz, Martinez, 2009, Decomposition of the mean squared error and NSE performance
criteria: Implications for improving hydrological modelling
output:
kge: Kling-Gupta Efficiency
cc: correlation
alpha: ratio of the standard deviation
beta: ratio of the mean
"""
cc = np.corrcoef(self.true, self.predicted)[0, 1]
alpha = np.std(self.predicted) / np.std(self.true)
beta = np.sum(self.predicted) / np.sum(self.true)
return post_process_kge(cc, alpha, beta, return_all)
def kge_bound(self) -> float:
"""
Bounded Version of the Original Kling-Gupta Efficiency
https://iahs.info/uploads/dms/13614.21--211-219-41-MATHEVET.pdf
"""
kge_ = self.kge(return_all=True)[0, :]
kge_c2m_ = kge_ / (2 - kge_)
return float(kge_c2m_)
def kge_mod(self, return_all=False):
"""
Modified Kling-Gupta Efficiency (Kling et al. 2012 - https://doi.org/10.1016/j.jhydrol.2012.01.011)
"""
# calculate error in timing and dynamics r (Pearson's correlation coefficient)
sim_mean = np.mean(self.predicted, axis=0, dtype=np.float64)
obs_mean = np.mean(self.true, dtype=np.float64)
r = np.sum((self.predicted - sim_mean) * (self.true - obs_mean), axis=0, dtype=np.float64) / \
np.sqrt(np.sum((self.predicted - sim_mean) ** 2, axis=0, dtype=np.float64) *
np.sum((self.true - obs_mean) ** 2, dtype=np.float64))
# calculate error in spread of flow gamma (avoiding cross correlation with bias by dividing by the mean)
gamma = (np.std(self.predicted, axis=0, dtype=np.float64) / sim_mean) / \
(np.std(self.true, dtype=np.float64) / obs_mean)
# calculate error in volume beta (bias of mean discharge)
beta = np.mean(self.predicted, axis=0, dtype=np.float64) / np.mean(self.true, axis=0, dtype=np.float64)
# calculate the modified Kling-Gupta Efficiency KGE'
return post_process_kge(r, gamma, beta, return_all)
def kge_np(self, return_all=False):
"""
Non parametric Kling-Gupta Efficiency
Corresponding paper:
Pool, Vis, and Seibert, 2018 Evaluating model performance: towards a non-parametric variant of the
Kling-Gupta efficiency, Hydrological Sciences Journal.
https://doi.org/10.1080/02626667.2018.1552002
output:
kge: Kling-Gupta Efficiency
cc: correlation
alpha: ratio of the standard deviation
beta: ratio of the mean
"""
# # self-made formula
cc = self.spearmann_corr()
fdc_sim = np.sort(self.predicted / (np.nanmean(self.predicted) * len(self.predicted)))
fdc_obs = np.sort(self.true / (np.nanmean(self.true) * len(self.true)))
alpha = 1 - 0.5 * np.nanmean(np.abs(fdc_sim - fdc_obs))
beta = np.mean(self.predicted) / np.mean(self.true)
return post_process_kge(cc, alpha, beta, return_all)
def kgeprime_c2m(self) -> float:
"""
https://iahs.info/uploads/dms/13614.21--211-219-41-MATHEVET.pdf
Bounded Version of the Modified Kling-Gupta Efficiency
"""
kgeprime_ = self.kge_mod(return_all=True)[0, :]
kgeprime_c2m_ = kgeprime_ / (2 - kgeprime_)
return float(kgeprime_c2m_)
def kgenp_bound(self):
"""
Bounded Version of the Non-Parametric Kling-Gupta Efficiency
"""
kgenp_ = self.kge_np(return_all=True)[0, :]
kgenp_c2m_ = kgenp_ / (2 - kgenp_)
return float(kgenp_c2m_)
def kl_sym(self) -> Union[float, None]:
"""Symmetric kullback-leibler divergence"""
if not all((self.true == 0) == (self.predicted == 0)):
return None # ('KL divergence not defined when only one distribution is 0.')
x, y = self.true, self.predicted
# set values where both distributions are 0 to the same (positive) value.
# This will not contribute to the final distance.
x[x == 0] = 1
y[y == 0] = 1
d = 0.5 * np.sum((x - y) * (np.log2(x) - np.log2(y)))
return float(d)
def lm_index(self, obs_bar_p=None) -> float:
"""Legate-McCabe Efficiency Index.
Less sensitive to outliers in the data.
obs_bar_p: float, Seasonal or other selected average. If None, the mean of the observed array will be used.
"""
mean_obs = np.mean(self.true)
a = np.abs(self.predicted - self.true)
if obs_bar_p is not None:
b = np.abs(self.true - obs_bar_p)
else:
b = np.abs(self.true - mean_obs)
return float(1 - (np.sum(a) / np.sum(b)))
def maape(self) -> float:
"""
Mean Arctangent Absolute Percentage Error
Note: result is NOT multiplied by 100
"""
return float(np.mean(np.arctan(np.abs((self.true - self.predicted) / (self.true + EPS)))))
def mae(self, true=None, predicted=None) -> float:
""" Mean Absolute Error """
if true is None:
true = self.true
if predicted is None:
predicted = self.predicted
return float(np.mean(np.abs(true - predicted)))
def mape(self) -> float:
""" Mean Absolute Percentage Error.
The MAPE is often used when the quantity to predict is known to remain way above zero [1]. It is useful when
the size or size of a prediction variable is significant in evaluating the accuracy of a prediction [2]. It has
advantages of scale-independency and interpretability [3]. However, it has the significant disadvantage that it
produces infinite or undefined values for zero or close-to-zero actual values [3].
[1] https://doi.org/10.1016/j.neucom.2015.12.114
[2] https://doi.org/10.1088/1742-6596/930/1/012002
[3] https://doi.org/10.1016/j.ijforecast.2015.12.003
"""
return float(np.mean(np.abs((self.true - self.predicted) / self.true)) * 100)
def mbe(self) -> float:
"""Mean bias error. This indicator expresses a tendency of model to underestimate (negative value)
or overestimate (positive value) global radiation, while the MBE values closest to zero are desirable.
The drawback of this test is that it does not show the correct performance when the model presents
overestimated and underestimated values at the same time, since overestimation and underestimation
values cancel each other. [1]
[1] https://doi.org/10.1016/j.rser.2015.08.035
"""
return float(np.mean(self._error(self.true, self.predicted)))
def mbrae(self, benchmark: np.ndarray = None) -> float:
""" Mean Bounded Relative Absolute Error """
return float(np.mean(self._bounded_relative_error(benchmark)))
def mapd(self) -> float:
"""Mean absolute percentage deviation."""
a = np.sum(np.abs(self.predicted - self.true))
b = np.sum(np.abs(self.true))
return float(a / b)
def mase(self, seasonality: int = 1):
"""
Mean Absolute Scaled Error
Baseline (benchmark) is computed with naive forecasting (shifted by @seasonality)
modified after https://gist.github.com/bshishov/5dc237f59f019b26145648e2124ca1c9
Hyndman, R. J. (2006). Another look at forecast-accuracy metrics for intermittent demand.
Foresight: The International Journal of Applied Forecasting, 4(4), 43-46.
"""
return self.mae() / self.mae(self.true[seasonality:], self._naive_prognose(seasonality))
def mare(self) -> float:
""" Mean Absolute Relative Error. When expressed in %age, it is also known as mape. [1]
https://doi.org/10.1016/j.rser.2015.08.035
"""
return float(np.mean(np.abs(self._error(self.true, self.predicted) / self.true)))
def max_error(self) -> float:
"""
maximum error
"""
return float(np.max(self._ae()))
def mb_r(self) -> float:
"""Mielke-Berry R value.
Berry and Mielke, 1988.
Mielke, P. W., & Berry, K. J. (2007). Permutation methods: a distance function approach.
Springer Science & Business Media.
"""
# Calculate metric
n = self.predicted.size
tot = 0.0
for i in range(n):
tot = tot + np.sum(np.abs(self.predicted - self.true[i]))
mae_val = np.sum(np.abs(self.predicted - self.true)) / n
mb = 1 - ((n ** 2) * mae_val / tot)
return float(mb)
def mda(self) -> float:
""" Mean Directional Accuracy
modified after https://gist.github.com/bshishov/5dc237f59f019b26145648e2124ca1c9
"""
dict_acc = np.sign(self.true[1:] - self.true[:-1]) == np.sign(self.predicted[1:] - self.predicted[:-1])
return float(np.mean(dict_acc))
def mde(self) -> float:
"""Median Error"""
return float(np.median(self.predicted - self.true))
def mdape(self) -> float:
"""
Median Absolute Percentage Error
"""
return float(np.median(np.abs(self._percentage_error())) * 100)
def mdrae(self, benchmark: np.ndarray = None) -> float:
""" Median Relative Absolute Error """
return float(np.median(np.abs(self._relative_error(benchmark))))
def me(self):
"""Mean error """
return float(np.mean(self._error()))
def mean_bias_error(self) -> float:
"""
Mean Bias Error
It represents overall bias error or systematic error. It shows average interpolation bias; i.e. average over-
or underestimation. [1][2].This indicator expresses a tendency of model to underestimate (negative value)
or overestimate (positive value) global radiation, while the MBE values closest to zero are desirable.
The drawback of this test is that it does not show the correct performance when the model presents
overestimated and underestimated values at the same time, since overestimation and underestimation
values cancel each other.
[2] Willmott, C. J., & Matsuura, K. (2006). On the use of dimensioned measures of error to evaluate the performance
of spatial interpolators. International Journal of Geographical Information Science, 20(1), 89-102.
https://doi.org/10.1080/1365881050028697
[1] Valipour, M. (2015). Retracted: Comparative Evaluation of Radiation-Based Methods for Estimation of Potential
Evapotranspiration. Journal of Hydrologic Engineering, 20(5), 04014068.
http://dx.doi.org/10.1061/(ASCE)HE.1943-5584.0001066
[3] https://doi.org/10.1016/j.rser.2015.08.035
"""
return float(np.sum(self.true - self.predicted) / len(self.true))
def mean_var(self) -> float:
"""Mean variance"""
return float(np.var(np.log1p(self.true) - np.log1p(self.predicted)))
def mean_poisson_deviance(self, weights=None) -> float:
"""
mean poisson deviance
"""
return _mean_tweedie_deviance(self.true, self.predicted, weights=weights, power=1)
def mean_gamma_deviance(self, weights=None) -> float:
"""
mean gamma deviance
"""
return _mean_tweedie_deviance(self.true, self.predicted, weights=weights, power=2)
def median_abs_error(self) -> float:
"""
median absolute error
"""
return float(np.median(np.abs(self.predicted - self.true), axis=0))
def med_seq_error(self) -> float:
"""Median Squared Error
Same as mse but it takes median which reduces the impact of outliers.
"""
return float(np.median((self.predicted - self.true) ** 2))
def mle(self) -> float:
"""Mean log error"""
return float(np.mean(np.log1p(self.predicted) - np.log1p(self.true)))
def mod_agreement_index(self, j=1) -> float:
"""Modified agreement of index.
j: int, when j==1, this is same as agreement_index. Higher j means more impact of outliers."""
a = (np.abs(self.predicted - self.true)) ** j
b = np.abs(self.predicted - np.mean(self.true))
c = np.abs(self.true - np.mean(self.true))
e = (b + c) ** j
return float(1 - (np.sum(a) / np.sum(e)))
def mpe(self) -> float:
""" Mean Percentage Error """
return float(np.mean(self._percentage_error()))
def mrae(self, benchmark: np.ndarray = None):
""" Mean Relative Absolute Error """
return float(np.mean(np.abs(self._relative_error(benchmark))))
def msle(self, weights=None) -> float:
"""
mean square logrithmic error
"""
return float(np.average((np.log1p(self.true) - np.log1p(self.predicted)) ** 2, axis=0, weights=weights))
def norm_euclid_distance(self) -> float:
"""Normalized Euclidian distance"""
a = self.true / np.mean(self.true)
b = self.predicted / np.mean(self.predicted)
return float(np.linalg.norm(a - b))
def nrmse_range(self) -> float:
"""Range Normalized Root Mean Squared Error.
RMSE normalized by true values. This allows comparison between data sets with different scales. It is more
sensitive to outliers.
Reference: Pontius et al., 2008
"""
return float(self.rmse() / (np.max(self.true) - np.min(self.true)))
def nrmse_ipercentile(self, q1=25, q2=75) -> float:
"""
RMSE normalized by inter percentile range of true. This is least sensitive to outliers.
q1: any interger between 1 and 99
q2: any integer between 2 and 100. Should be greater than q1.
Reference: Pontius et al., 2008.
"""
q1 = np.percentile(self.true, q1)
q3 = np.percentile(self.true, q2)
iqr = q3 - q1
return float(self.rmse() / iqr)
def nrmse_mean(self) -> float:
"""Mean Normalized RMSE
RMSE normalized by mean of true values.This allows comparison between datasets with different scales.
Reference: Pontius et al., 2008
"""
return float(self.rmse() / np.mean(self.true))
def norm_ae(self) -> float:
""" Normalized Absolute Error """
return float(np.sqrt(np.sum(np.square(self._error() - self.mae())) / (len(self.true) - 1)))
def norm_ape(self) -> float:
""" Normalized Absolute Percentage Error """
return float(np.sqrt(np.sum(np.square(self._percentage_error() - self.mape())) / (len(self.true) - 1)))
def nrmse(self) -> float:
""" Normalized Root Mean Squared Error """
return float(self.rmse() / (np.max(self.true) - np.min(self.true)))
def nse(self) -> float:
"""Nash-Sutcliff Efficiency.
It determine how well the model simulates trends for the output response of concern. But cannot help identify
model bias and cannot be used to identify differences in timing and magnitude of peak flows and shape of
recession curves; in other words, it cannot be used for single-event simulations. It is sensitive to extreme
values due to the squared differ-ences [1]. To make it less sensitive to outliers, [2] proposed
log and relative nse.
[1] Moriasi, D. N., Gitau, M. W., Pai, N., & Daggupati, P. (2015). Hydrologic and water quality models:
Performance measures and evaluation criteria. Transactions of the ASABE, 58(6), 1763-1785.
[2] Krause, P., Boyle, D., & Bäse, F. (2005). Comparison of different efficiency criteria for hydrological
model assessment. Adv. Geosci., 5, 89-97. http://dx.doi.org/10.5194/adgeo-5-89-2005.
"""
_nse = 1 - sum((self.predicted - self.true) ** 2) / sum((self.true - np.mean(self.true)) ** 2)
return float(_nse)
def nse_alpha(self) -> float:
"""
Alpha decomposition of the NSE, see [Gupta et al. 2009](https://doi.org/10.1029/97WR03495)
used in kratzert et al., 2018
Returns
-------
float
Alpha decomposition of the NSE
"""
return float(np.std(self.predicted) / np.std(self.true))
def nse_beta(self) -> float:
"""
Beta decomposition of NSE. See [Gupta et. al 2009](https://doi.org/10.1016/j.jhydrol.2009.08.003)
used in kratzert et al., 2018
Returns
-------
float
Beta decomposition of the NSE
"""
return float((np.mean(self.predicted) - np.mean(self.true)) / np.std(self.true))
def nse_mod(self, j=1) -> float:
"""
Gives less weightage of outliers if j=1 and if j>1, gives more weightage to outliers.
Reference: Krause et al., 2005
"""
a = (np.abs(self.predicted - self.true)) ** j
b = (np.abs(self.true - np.mean(self.true))) ** j
return float(1 - (np.sum(a) / np.sum(b)))
def nse_rel(self) -> float:
"""
Relative NSE.
"""
a = (np.abs((self.predicted - self.true) / self.true)) ** 2
b = (np.abs((self.true - np.mean(self.true)) / np.mean(self.true))) ** 2
return float(1 - (np.sum(a) / np.sum(b)))
def nse_bound(self) -> float:
"""
Bounded Version of the Nash-Sutcliffe Efficiency
https://iahs.info/uploads/dms/13614.21--211-219-41-MATHEVET.pdf
"""
nse_ = self.nse()
nse_c2m_ = nse_ / (2 - nse_)
return nse_c2m_
def log_nse(self, epsilon=0.0) -> float:
"""
log Nash-Sutcliffe model efficiency
.. math::
NSE = 1-\\frac{\\sum_{i=1}^{N}(log(e_{i})-log(s_{i}))^2}{\\sum_{i=1}^{N}(log(e_{i})-log(\\bar{e})^2}-1)*-1
"""
s, o = self.predicted + epsilon, self.true + epsilon # todo, check why s is here
return float(1 - sum((np.log(o) - np.log(o)) ** 2) / sum((np.log(o) - np.mean(np.log(o))) ** 2))
def log_prob(self) -> float:
"""
Logarithmic probability distribution
"""
scale = np.mean(self.true) / 10
if scale < .01:
scale = .01
y = (self.true - self.predicted) / scale
normpdf = -y ** 2 / 2 - np.log(np.sqrt(2 * np.pi))
return float(np.mean(normpdf))
def pbias(self) -> float:
"""
Percent Bias.
It determine how well the model simulates the average magnitudes for the
output response of interest. It can also determine over and under-prediction.
It cannot be used (1) for single-event simula-tions to identify differences
in timing and magnitude of peak flows and the shape of recession curves nor (2)
to determine how well the model simulates residual variations and/or trends
for the output response of interest. It can give a deceiving rating of
model performance if the model overpredicts as much as it underpredicts,
in which case PBIAS will be close to zero even though the model simulation
is poor. [1]
[1] Moriasi et al., 2015
"""
return float(100.0 * sum(self.predicted - self.true) / sum(self.true))
def rmsle(self) -> float:
"""Root mean square log error.
This error is less sensitive to [outliers](https://stats.stackexchange.com/q/56658/314919).
Compared to RMSE, RMSLE only considers the relative error between predicted
and actual values, and the scale of the error is nullified by the log-transformation.
Furthermore, RMSLE penalizes underestimation more than overestimation.
This is especially useful in those studies where the underestimation
of the target variable is not acceptable but overestimation can be tolerated. [1]
[1] https://doi.org/10.1016/j.scitotenv.2020.137894
"""
return float(np.sqrt(np.mean(np.power(np.log1p(self.predicted) - np.log1p(self.true), 2))))
def rmdspe(self) -> float:
"""
Root Median Squared Percentage Error
"""
return float(np.sqrt(np.median(np.square(self._percentage_error()))) * 100.0)
def rse(self) -> float:
"""Relative Squared Error"""
return float(np.sum(np.square(self.true - self.predicted)) / np.sum(np.square(self.true - np.mean(self.true))))
def rrse(self) -> float:
""" Root Relative Squared Error """
return float(np.sqrt(self.rse()))
def rae(self) -> float:
""" Relative Absolute Error (aka Approximation Error) """
return float(np.sum(self._ae()) / (np.sum(np.abs(self.true - np.mean(self.true))) + EPS))
def ref_agreement_index(self) -> float:
"""Refined Index of Agreement. From -1 to 1. Larger the better.
Refrence: Willmott et al., 2012"""
a = np.sum(np.abs(self.predicted - self.true))
b = 2 * np.sum(np.abs(self.true - self.true.mean()))
if a <= b:
return float(1 - (a / b))
else:
return float((b / a) - 1)
def rel_agreement_index(self) -> float:
"""Relative index of agreement. from 0 to 1. larger the better."""
a = ((self.predicted - self.true) / self.true) ** 2
b = np.abs(self.predicted - np.mean(self.true))
c = np.abs(self.true - np.mean(self.true))
e = ((b + c) / np.mean(self.true)) ** 2
return float(1 - (np.sum(a) / np.sum(e)))
def rmse(self, weights=None) -> float:
""" root mean square error"""
return sqrt(np.average((self.true - self.predicted) ** 2, axis=0, weights=weights))
def r2(self) -> float:
"""
Quantifies the percent of variation in the response that the 'model'
explains. The 'model' here is anything from which we obtained predicted
array. It is also called coefficient of determination or square of pearson
correlation coefficient. More heavily affected by outliers than pearson correlatin r.
https://data.library.virginia.edu/is-r-squared-useless/
"""
zx = (self.true - np.mean(self.true)) / np.std(self.true, ddof=1)
zy = (self.predicted - np.mean(self.predicted)) / np.std(self.predicted, ddof=1)
r = np.sum(zx * zy) / (len(self.true) - 1)
return float(r ** 2)
def r2_score(self, weights=None):
"""
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
"""
if len(self.predicted) < 2:
msg = "R^2 score is not well-defined with less than two samples."
warnings.warn(msg)
return None
if weights is None:
weight = 1.
else:
weight = weights[:, np.newaxis]
numerator = (weight * (self.true - self.predicted) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (self.true - np.average(
self.true, axis=0, weights=weights)) ** 2).sum(axis=0, dtype=np.float64)
if numerator == 0.0:
return None
output_scores = _foo(denominator, numerator)
return float(np.average(output_scores, weights=weights))
def relative_rmse(self) -> float:
"""
Relative Root Mean Squared Error
.. math::
RRMSE=\\frac{\\sqrt{\\frac{1}{N}\\sum_{i=1}^{N}(e_{i}-s_{i})^2}}{\\bar{e}}
"""
rrmse = self.rmse() / np.mean(self.true)
return float(rrmse)
def rmspe(self) -> float:
"""
Root Mean Square Percentage Error
https://stackoverflow.com/a/53166790/5982232
"""
return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))
def rsr(self) -> float:
"""
Moriasi et al., 2007.
It incorporates the benefits of error index statistics andincludes a
scaling/normalization factor, so that the resulting statistic and reported
values can apply to various constitu-ents."""
return float(self.rmse() / np.std(self.true))
def rmsse(self, seasonality: int = 1) -> float:
""" Root Mean Squared Scaled Error """
q = np.abs(self._error()) / self.mae(self.true[seasonality:], self._naive_prognose(seasonality))
return float(np.sqrt(np.mean(np.square(q))))
def sa(self) -> float:
"""Spectral angle. From -pi/2 to pi/2. Closer to 0 is better.
It measures angle between two vectors in hyperspace indicating
how well the shape of two arrays match instead of their magnitude.
Reference: Robila and Gershman, 2005."""
a = np.dot(self.predicted, self.true)
b = np.linalg.norm(self.predicted) * np.linalg.norm(self.true)
return float(np.arccos(a / b))
def sc(self) -> float:
"""Spectral correlation.
From -pi/2 to pi/2. Closer to 0 is better.
"""
a = np.dot(self.true - np.mean(self.true), self.predicted - np.mean(self.predicted))
b = np.linalg.norm(self.true - np.mean(self.true))
c = np.linalg.norm(self.predicted - np.mean(self.predicted))
e = b * c
return float(np.arccos(a / e))
def sga(self) -> float:
"""Spectral gradient angle.
From -pi/2 to pi/2. Closer to 0 is better.
"""
sgx = self.true[1:] - self.true[:self.true.size - 1]
sgy = self.predicted[1:] - self.predicted[:self.predicted.size - 1]
a = np.dot(sgx, sgy)
b = np.linalg.norm(sgx) * np.linalg.norm(sgy)
return float(np.arccos(a / b))
def smape(self) -> float:
"""
Symmetric Mean Absolute Percentage Error
https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error
https://stackoverflow.com/a/51440114/5982232
"""
_temp = np.sum(2 * np.abs(self.predicted - self.true) / (np.abs(self.true) + np.abs(self.predicted)))
return float(100 / len(self.true) * _temp)
def smdape(self) -> float:
"""
Symmetric Median Absolute Percentage Error
Note: result is NOT multiplied by 100
"""
return float(np.median(2.0 * self._ae() / ((np.abs(self.true) + np.abs(self.predicted)) + EPS)))
def sid(self) -> float:
"""Spectral Information Divergence.
From -pi/2 to pi/2. Closer to 0 is better. """
first = (self.true / np.mean(self.true)) - (
self.predicted / np.mean(self.predicted))
second1 = np.log10(self.true) - np.log10(np.mean(self.true))
second2 = np.log10(self.predicted) - np.log10(np.mean(self.predicted))
return float(np.dot(first, second1 - second2))
def skill_score_murphy(self) -> float:
"""
Adopted from https://github.com/PeterRochford/SkillMetrics/blob/278b2f58c7d73566f25f10c9c16a15dc204f5869/skill_metrics/skill_score_murphy.py
Calculate non-dimensional skill score (SS) between two variables using
definition of Murphy (1988) using the formula:
SS = 1 - RMSE^2/SDEV^2
SDEV is the standard deviation of the true values
SDEV^2 = sum_(n=1)^N [r_n - mean(r)]^2/(N-1)
where p is the predicted values, r is the reference values, and N is the total number of values in p & r.
Note that p & r must have the same number of values. A positive skill score can be interpreted as the percentage
of improvement of the new model forecast in comparison to the reference. On the other hand, a negative skill
score denotes that the forecast of interest is worse than the referencing forecast. Consequently, a value of
zero denotes that both forecasts perform equally [MLAir, 2020].
Output:
SS : skill score
Reference:
Allan H. Murphy, 1988: Skill Scores Based on the Mean Square Error
and Their Relationships to the Correlation Coefficient. Mon. Wea.
Rev., 116, 2417-2424.
doi: http//dx.doi.org/10.1175/1520-0493(1988)<2417:SSBOTM>2.0.CO;2
"""
# Calculate RMSE
rmse2 = self.rmse() ** 2
# Calculate standard deviation
sdev2 = np.std(self.true, ddof=1) ** 2
# Calculate skill score
ss = 1 - rmse2 / sdev2
return float(ss)
def spearmann_corr(self) -> float:
"""Separmann correlation coefficient.
This is a nonparametric metric and assesses how well the relationship
between the true and predicted data can be described using a monotonic
function.
https://hess.copernicus.org/articles/24/2505/2020/hess-24-2505-2020.pdf
"""
# todo, is this spearman rank correlation?
col = [list(a) for a in zip(self.true, self.predicted)]
xy = sorted(col, key=lambda _x: _x[0], reverse=False)
# rang of x-value
for i, row in enumerate(xy):
row.append(i + 1)
a = sorted(xy, key=lambda _x: _x[1], reverse=False)
# rang of y-value
for i, row in enumerate(a):
row.append(i + 1)
mw_rank_x = np.nanmean(np.array(a)[:, 2])
mw_rank_y = np.nanmean(np.array(a)[:, 3])
numerator = np.nansum([float((a[j][2] - mw_rank_x) * (a[j][3] - mw_rank_y)) for j in range(len(a))])
denominator1 = np.sqrt(np.nansum([(a[j][2] - mw_rank_x) ** 2. for j in range(len(a))]))
denominator2 = np.sqrt(np.nansum([(a[j][3] - mw_rank_x) ** 2. for j in range(len(a))]))
return float(numerator / (denominator1 * denominator2))
def sse(self) -> float:
"""Sum of squared errors (model vs actual).
measure of how far off our model’s predictions are from the observed values. A value of 0 indicates that all
predications are spot on. A non-zero value indicates errors.
https://dziganto.github.io/data%20science/linear%20regression/machine%20learning/python/Linear-Regression-101-Metrics/
This is also called residual sum of squares (RSS) or sum of squared residuals as per
https://www.tutorialspoint.com/statistics/residual_sum_of_squares.htm
"""
squared_errors = (self.true - self.predicted) ** 2
return float(np.sum(squared_errors))
def std_ratio(self, **kwargs) -> float:
"""ratio of standard deviations of predictions and trues.
Also known as standard ratio, it varies from 0.0 to infinity while
1.0 being the perfect value.
"""
return float(np.std(self.predicted, **kwargs) / np.std(self.true, **kwargs))
def umbrae(self, benchmark: np.ndarray = None):
""" Unscaled Mean Bounded Relative Absolute Error """
return self.mbrae(benchmark) / (1 - self.mbrae(benchmark))
def ve(self) -> float:
"""
Volumetric efficiency. from 0 to 1. Smaller the better.
Reference: Criss and Winston 2008.
"""
a = np.sum(np.abs(self.predicted - self.true))
b = np.sum(self.true)
return float(1 - (a / b))
def volume_error(self) -> float:
"""
Returns the Volume Error (Ve).
It is an indicator of the agreement between the averages of the simulated
and observed runoff (i.e. long-term water balance).
used in this paper:
Reynolds, J.E., S. Halldin, C.Y. Xu, J. Seibert, and A. Kauffeldt. 2017.
"Sub-Daily Runoff Predictions Using Parameters Calibrated on the Basis of Data with a
Daily Temporal Resolution." Journal of Hydrology 550 (July):399?411.
https://doi.org/10.1016/j.jhydrol.2017.05.012.
.. math::
Sum(self.predicted- true)/sum(self.predicted)
"""
# TODO written formula and executed formula are different.
ve = np.sum(self.predicted - self.true) / np.sum(self.true)
return float(ve)
def wape(self) -> float:
"""
[weighted absolute percentage error](https://mattdyor.wordpress.com/2018/05/23/calculating-wape/)
It is a variation of mape but [more suitable for intermittent and low-volume
data](https://arxiv.org/pdf/2103.12057v1.pdf).
"""
return float(np.sum(self._ae() / np.sum(self.true)))
def watt_m(self) -> float:
"""Watterson's M.
Refrence: Watterson., 1996"""
a = 2 / np.pi
c = np.std(self.true, ddof=1) ** 2 + np.std(self.predicted, ddof=1) ** 2
e = (np.mean(self.predicted) - np.mean(self.true)) ** 2
f = c + e
return float(a * np.arcsin(1 - (self.mse() / f)))
def wmape(self) -> float:
"""
Weighted Mean Absolute Percent Error
https://stackoverflow.com/a/54833202/5982232
"""
# Take a series (actual) and a dataframe (forecast) and calculate wmape
# for each forecast. Output shape is (1, num_forecasts)
# Make an array of mape (same shape as forecast)
se_mape = abs(self.true - self.predicted) / self.true
# Calculate sum of actual values
ft_actual_sum = self.true.sum(axis=0)
# Multiply the actual values by the mape
se_actual_prod_mape = self.true * se_mape
# Take the sum of the product of actual values and mape
# Make sure to sum down the rows (1 for each column)
ft_actual_prod_mape_sum = se_actual_prod_mape.sum(axis=0)
# Calculate the wmape for each forecast and return as a dictionary
ft_wmape_forecast = ft_actual_prod_mape_sum / ft_actual_sum
return float(ft_wmape_forecast)
def post_process_kge(cc, alpha, beta, return_all=False):
kge = float(1 - np.sqrt((cc - 1) ** 2 + (alpha - 1) ** 2 + (beta - 1) ** 2))
if return_all:
return np.vstack((kge, cc, alpha, beta))
else:
return kge | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/SeqMetrics/_rgr.py | _rgr.py |
import os
import itertools
from types import FunctionType
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import xlogy
try:
import plotly.graph_objects as go
except ModuleNotFoundError:
go = None
def take(st, en, d):
keys = list(d.keys())[st:en]
values = list(d.values())[st:en]
return {k: v for k, v in zip(keys, values)}
def plot_metrics(
metrics: dict,
ranges: tuple = ((0.0, 1.0), (1.0, 10), (10, 1000)),
exclude: list = None,
plot_type: str = 'bar',
max_metrics_per_fig: int = 15,
show: bool = True,
save: bool = False,
save_path: str = None,
**kwargs):
"""
Plots the metrics given as dictionary as radial or bar plot between specified ranges.
Arguments:
metrics:
dictionary whose keys are names are erros and values are error values.
ranges:
tuple of tuples defining range of errors to plot in one plot
exclude:
List of metrics to be excluded from plotting.
max_metrics_per_fig:
maximum number of metrics to show in one figure.
plot_type:
either of `radial` or `bar`.
show : If, then figure will be shown/drawn
save:
if True, the figure will be saved.
save_path:
if given, the figure will the saved at this location.
kwargs:
keyword arguments for plotting
Examples:
>>> import numpy as np
>>> from ai4water.postprocessing.SeqMetrics import RegressionMetrics
>>> from ai4water.postprocessing.SeqMetrics import plot_metrics
>>> t = np.random.random((20, 1))
>>> p = np.random.random((20, 1))
>>> er = RegressionMetrics(t, p)
>>> all_errors = er.calculate_all()
>>> plot_metrics(all_errors, plot_type='bar', max_metrics_per_fig=50)
>>># or draw the radial plot
>>> plot_metrics(all_errors, plot_type='radial', max_metrics_per_fig=50)
```
"""
for idx, rng in enumerate(ranges):
assert rng[1] > rng[0], f'For range {idx}, second value: {rng[1]} is not greater than first value: {rng[0]}. '
assert len(rng) == 2, f"Range number {idx} has length {len(rng)}. It must be a tuple of length 2."
if exclude is None:
exclude = []
_metrics = metrics.copy()
for k in metrics.keys():
if k in exclude:
_metrics.pop(k)
assert plot_type in ['bar', 'radial'], f'plot_type must be either `bar` or `radial`.'
for _range in ranges:
plot_metrics_between(
_metrics,
*_range,
plot_type=plot_type,
max_metrics_per_fig=max_metrics_per_fig,
show=show,
save=save,
save_path=save_path, **kwargs)
return
def plot_metrics_between(
errors: dict,
lower: int,
upper: int,
plot_type: str = 'bar',
max_metrics_per_fig: int = 15,
save=False,
show=True,
save_path=None,
**kwargs):
zero_to_one = {}
for k, v in errors.items():
if v is not None:
if lower < v < upper:
zero_to_one[k] = v
st = 0
n = len(zero_to_one)
for i in np.array(np.linspace(0, n, int(n/max_metrics_per_fig)+1),
dtype=np.int32):
if i == 0:
pass
else:
en = i
d = take(st, en, zero_to_one)
if plot_type == 'radial':
plot_radial(d, lower, upper, save=save, show=show, save_path=save_path, **kwargs)
else:
plot_circular_bar(d, save=save, show=show, save_path=save_path, **kwargs)
st = i
return
def plot_radial(errors: dict, low: int, up: int, save=True, save_path=None, **kwargs):
"""Plots all the errors in errors dictionary. low and up are used to draw the limits of radial plot."""
if go is None:
print("can not plot radial plot because plotly is not installed.")
return
fill = kwargs.get('fill', None)
fillcolor = kwargs.get('fillcolor', None)
line = kwargs.get('line', None)
marker = kwargs.get('marker', None)
OrderedDict(sorted(errors.items(), key=lambda kv: kv[1]))
lower = round(np.min(list(errors.values())), 4)
upper = round(np.max(list(errors.values())), 4)
fig = go.Figure()
categories = list(errors.keys())
fig.add_trace(go.Scatterpolar(
r=list(errors.values()),
theta=categories, # angular coordinates
fill=fill,
fillcolor=fillcolor,
line=line,
marker=marker,
name='errors'
))
fig.update_layout(
title_text=f"Errors from {lower} to {upper}",
polar=dict(
radialaxis=dict(
visible=True,
range=[low, up]
)),
showlegend=False
)
fig.show()
if save:
fname = f"radial_errors_from_{lower}_to_{upper}.png"
if save_path is not None:
fname = os.path.join(save_path, fname)
fig.write_image(fname)
return
def plot_circular_bar(
metrics: dict,
show=False,
save: bool = True,
save_path: str = '',
**kwargs):
"""
modified after https://www.python-graph-gallery.com/circular-barplot-basic
:param metrics:
:param show:
:param save:
:param save_path:
:param kwargs:
figsize:
linewidth:
edgecolor:
color:
:return:
"""
# initialize the figure
plt.close('all')
plt.figure(figsize=kwargs.get('figsize', (8, 12)))
ax = plt.subplot(111, polar=True)
plt.axis('off')
# Set the coordinates limits
# upperLimit = 100
lower_limit = 30
value = np.array(list(metrics.values()))
lower = round(np.min(list(metrics.values())), 4)
upper = round(np.max(list(metrics.values())), 4)
# Compute max and min in the dataset
_max = max(value) # df['Value'].max()
# Let's compute heights: they are a conversion of each item value in those new coordinates
# In our example, 0 in the dataset will be converted to the lowerLimit (10)
# The maximum will be converted to the upperLimit (100)
slope = (_max - lower_limit) / _max
heights = slope * value + lower_limit
# Compute the width of each bar. In total we have 2*Pi = 360°
width = 2 * np.pi / len(metrics)
# Compute the angle each bar is centered on:
indexes = list(range(1, len(metrics) + 1))
angles = [element * width for element in indexes]
# Draw bars
bars = ax.bar(
x=angles,
height=heights,
width=width,
bottom=lower_limit,
linewidth=kwargs.get('linewidth', 2),
edgecolor=kwargs.get('edgecolor', "white"),
color=kwargs.get('color', "#61a4b2"),
)
# little space between the bar and the label
label_padding = 4
metric_names = {
'r2': "$R^2$",
'r2_mod': "$R^2$ mod",
'adjusted_r2': 'adjusted $R^2$',
# 'nse': "NSE"
}
# Add labels
for bar, angle, label1, label2 in zip(bars, angles, metrics.keys(), metrics.values()):
label1 = metric_names.get(label1, label1)
label = f'{label1} {round(label2, 4)}'
# Labels are rotated. Rotation must be specified in degrees :(
rotation = np.rad2deg(angle)
# Flip some labels upside down
if angle >= np.pi / 2 and angle < 3 * np.pi / 2:
alignment = "right"
rotation = rotation + 180
else:
alignment = "left"
# Finally add the labels
ax.text(
x=angle,
y=lower_limit + bar.get_height() + label_padding,
s=label,
ha=alignment,
va='center',
rotation=rotation,
rotation_mode="anchor")
if save:
fname = f"{len(metrics)}_bar_errors_from_{lower}_to_{upper}.png"
if save_path is not None:
fname = os.path.join(save_path, fname)
plt.savefig(fname, dpi=400, bbox_inches='tight')
if show:
plt.show()
return
def plot1d(true, predicted, save=True, name="plot", show=False):
_, axis = plt.subplots()
axis.plot(np.arange(len(true)), true, label="True")
axis.plot(np.arange(len(predicted)), predicted, label="Predicted")
axis.legend(loc="best")
if save:
plt.savefig(name, dpi=300, bbox_inches='tight')
if show:
plt.show()
plt.close('all')
return
def _foo(denominator, numerator):
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(1)
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
return output_scores
def _mean_tweedie_deviance(y_true, y_pred, power=0, weights=None):
# copying from
# https://github.com/scikit-learn/scikit-learn/blob/95d4f0841d57e8b5f6b2a570312e9d832e69debc/sklearn/metrics/_regression.py#L659
message = ("Mean Tweedie deviance error with power={} can only be used on "
.format(power))
if power < 0:
# 'Extreme stable', y_true any real number, y_pred > 0
if (y_pred <= 0).any():
raise ValueError(message + "strictly positive y_pred.")
dev = 2 * (np.power(np.maximum(y_true, 0), 2 - power)
/ ((1 - power) * (2 - power))
- y_true * np.power(y_pred, 1 - power) / (1 - power)
+ np.power(y_pred, 2 - power) / (2 - power))
elif power == 0:
# Normal distribution, y_true and y_pred any real number
dev = (y_true - y_pred) ** 2
elif power < 1:
raise ValueError("Tweedie deviance is only defined for power<=0 and "
"power>=1.")
elif power == 1:
# Poisson distribution, y_true >= 0, y_pred > 0
if (y_true < 0).any() or (y_pred <= 0).any():
raise ValueError(message + "non-negative y_true and strictly "
"positive y_pred.")
dev = 2 * (xlogy(y_true, y_true / y_pred) - y_true + y_pred)
elif power == 2:
# Gamma distribution, y_true and y_pred > 0
if (y_true <= 0).any() or (y_pred <= 0).any():
raise ValueError(message + "strictly positive y_true and y_pred.")
dev = 2 * (np.log(y_pred / y_true) + y_true / y_pred - 1)
else:
if power < 2:
# 1 < p < 2 is Compound Poisson, y_true >= 0, y_pred > 0
if (y_true < 0).any() or (y_pred <= 0).any():
raise ValueError(message + "non-negative y_true and strictly "
"positive y_pred.")
else:
if (y_true <= 0).any() or (y_pred <= 0).any():
raise ValueError(message + "strictly positive y_true and "
"y_pred.")
dev = 2 * (np.power(y_true, 2 - power) / ((1 - power) * (2 - power))
- y_true * np.power(y_pred, 1 - power) / (1 - power)
+ np.power(y_pred, 2 - power) / (2 - power))
return float(np.average(dev, weights=weights))
def _geometric_mean(a, axis=0, dtype=None):
""" Geometric mean """
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return float(np.exp(log_a.mean(axis=axis)))
def listMethods(cls):
return set(x for x, y in cls.__dict__.items()
if isinstance(y, (FunctionType, classmethod, staticmethod)))
def listParentMethods(cls):
return set(itertools.chain.from_iterable(
listMethods(c).union(listParentMethods(c)) for c in cls.__bases__))
def list_subclass_methods(cls, is_narrow, ignore_underscore=True, additional_ignores=None):
"""Finds all methods of a child class"""
methods = listMethods(cls)
if is_narrow:
parent_methods = listParentMethods(cls)
methods = set(cls for cls in methods if not (cls in parent_methods))
if additional_ignores is not None:
methods = methods - set(additional_ignores)
if ignore_underscore:
methods = set(cls for cls in methods if not cls.startswith('_'))
return methods
def msg(module, module_type="class"):
return f"""
{module} {module_type} has been moved to its own repository called SeqMetrics
Please install it using 'pip install SeqMetrics' and then import {module}
as
from SeqMetrics import {module}
""" | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/SeqMetrics/utils.py | utils.py |
from ai4water.backend import np, pd, os
class ExplainerMixin(object):
def __init__(
self,
path,
data,
features,
save=True,
show=True,
):
if not os.path.exists(path):
os.makedirs(path)
self.path = path
self.data = data
self.features = features
self.save = save
self.show = show
@property
def data_is_2d(self):
if isinstance(self.data, np.ndarray) and self.data.ndim == 2:
return True
elif isinstance(self.data, pd.DataFrame):
return True
else:
return False
@property
def data_is_3d(self):
if isinstance(self.data, np.ndarray) and self.data.ndim == 3:
return True
return False
@property
def single_source(self):
if isinstance(self.data, list) and len(self.data) > 1:
return False
else:
return True
@property
def features(self):
return self._features
@features.setter
def features(self, features):
if self.data_is_2d:
if type(self.data) == pd.DataFrame:
features = self.data.columns.to_list()
elif features is None:
features = [f"Feature {i}" for i in range(self.data.shape[-1])]
else:
assert isinstance(features, list) and len(features) == self.data.shape[-1], f"""
features must be given as list of length {self.data.shape[-1]}
but are of len {len(features)}
"""
features = features
elif not self.single_source and features is None:
features = []
for data in self.data:
if isinstance(data, pd.DataFrame):
_features = data.columns.to_list()
else:
_features = [f"Feature {i}" for i in range(data.shape[-1])]
features.append(_features)
elif self.data_is_3d and features is None:
features = [f"Feature {i}" for i in range(self.data.shape[-1])]
self._features = features
@property
def unrolled_features(self):
# returns the possible names of features if unrolled over time
if not self.data_is_2d and self.single_source:
features = self.features
if features is None:
features = [f"Feature_{i}" for i in range(self.data.shape[-1])]
lookback = self.data.shape[1]
features = [[f"{f}_{i}" for f in features] for i in range(lookback)]
features = [item for sublist in features for item in sublist]
else:
features = None
return features | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/explain/_explain.py | _explain.py |
import gc
from typing import Union, Callable, List
import scipy.stats as stats
from SeqMetrics import RegressionMetrics, ClassificationMetrics
from ai4water.backend import np, plt, os, easy_mpl
from ._explain import ExplainerMixin
from ai4water.utils.utils import reset_seed, ERROR_LABELS
imshow = easy_mpl.imshow
bar_chart = easy_mpl.bar_chart
boxplot = easy_mpl.boxplot
class PermutationImportance(ExplainerMixin):
"""
permutation importance answers the question, how much the model's prediction
performance is influenced by a feature? It defines the feature importance as
the decrease in model performance when one feature is corrupted
Molnar_ et al., 2021
Attributes:
importances
Example
-------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> from ai4water.postprocessing.explain import PermutationImportance
>>> data = busan_beach()
>>> model = Model(model="XGBRegressor", verbosity=0)
>>> model.fit(data=data)
>>> x_val, y_val = model.validation_data()
... # initialize the PermutationImportance class
>>> pimp = PermutationImportance(model.predict, x_val, y_val.reshape(-1,))
>>> fig = pimp.plot_1d_pimp()
.. _Molnar:
https://christophm.github.io/interpretable-ml-book/feature-importance.html#feature-importance
"""
def __init__(
self,
model: Callable,
inputs: Union[np.ndarray, List[np.ndarray]],
target: np.ndarray,
scoring: Union[str, Callable] = "r2",
n_repeats: int = 14,
noise: Union[str, np.ndarray] = None,
cat_map:dict = None,
use_noise_only: bool = False,
feature_names: list = None,
path: str = None,
seed: int = None,
weights=None,
save: bool = True,
show: bool = True,
**kwargs
):
"""
initiates a the class and calculates the importances
Arguments:
model:
the trained model object which is callable e.g. if you have Keras
or sklearn model then you should pass `model.predict` instead
of `model`.
inputs:
arrays or list of arrays which will be given as input to `model`
target:
the true outputs or labels for corresponding `inputs`
It must be a 1-dimensional numpy array
scoring:
the peformance metric to use. It can be any metric from RegressionMetrics_ or
ClassificationMetrics_ or a callable. If callable, then this must take
true and predicted as input and sprout a float as output
n_repeats:
number of times the permutation for each feature is performed. Number
of calls to the `model` will be `num_features * n_repeats`
noise:
The noise to add in the feature. It should be either an array of noise
or a string of scipy distribution name_ defining noise.
use_noise_only:
If True, the original feature will be replaced by the noise.
weights:
feature_names:
names of features
seed:
random seed for reproducibility. Permutation importance is
strongly affected by random seed. Therfore, if you want to
reproduce your results, set this value to some integer value.
path:
path to save the plots
show:
whether to show the plot or not
save:
whether to save the plot or not
kwargs:
any additional keyword arguments for `model`
.. _name:
https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
.. _RegressionMetrics:
https://seqmetrics.readthedocs.io/en/latest/rgr.html#regressionmetrics
.. _ClassificationMetrics:
https://seqmetrics.readthedocs.io/en/latest/cls.html#classificationmetrics
"""
assert callable(model), f"model must be callable"
self.model = model
if inputs.__class__.__name__ in ["Series", "DataFrame"]:
inputs = inputs.values
self.x = inputs
self.y = target
self.scoring = scoring
self.noise = noise
self.cat_map = cat_map
if use_noise_only:
if noise is None:
raise ValueError("you must define the noise in order to replace it with feature")
self.use_noise_only = use_noise_only
self.n_repeats = n_repeats
self.weights = weights
self.kwargs = kwargs
self.importances = None
super().__init__(features=feature_names,
data=inputs,
path=path or os.getcwd(),
show=show,
save=save
)
self.seed = seed
self.base_score = self._base_score()
self._calculate(**kwargs)
@property
def noise(self):
return self._noise
@noise.setter
def noise(self, x):
if x is not None:
if isinstance(x, str):
x = getattr(stats, x)().rvs(len(self.y))
else:
assert isinstance(x, np.ndarray) and len(x) == len(self.y)
self._noise = x
def _base_score(self) -> float:
"""calculates the base score"""
return self._score(self.model(self.x, **self.kwargs))
def _score(self, pred) -> float:
"""given the prediction, it calculates the score"""
if callable(self.scoring):
return self.scoring(self.y, pred)
else:
if hasattr(RegressionMetrics, self.scoring):
errors = RegressionMetrics(self.y, pred)
else:
errors = ClassificationMetrics(self.y, pred)
return getattr(errors, self.scoring)()
def _calculate(
self,
**kwargs
):
"""Calculates permutation importance using self.x"""
if self.single_source:
if self.x.ndim == 2: # 2d input
results = self._permute_importance_2d(self.x, **kwargs)
else:
results = {}
for lb in range(self.x.shape[1]):
results[lb] = self._permute_importance_2d(self.x,
time_step=lb,
**kwargs)
else:
results = {}
for idx in range(len(self.x)):
if self.x[idx].ndim == 2: # current input is 2d
results[idx] = self._permute_importance_2d(
self.x,
idx,
**kwargs
)
elif self.x[idx].ndim == 3: # current input is 3d
_results = {}
for lb in range(self.x[idx].shape[1]):
_results[lb] = self._permute_importance_2d(self.x,
inp_idx=idx,
time_step=lb,
**kwargs)
results[idx] = _results
else:
raise NotImplementedError
setattr(self, 'importances', results)
return results
def plot_as_heatmap(
self,
annotate=True,
**kwargs
):
"""plots the permutation importance as heatmap.
The input data must be 3d.
Arguments:
annotate:
whether to annotate the heat map with
kwargs:
any keyword arguments for imshow_ function.
.. _imshow:
https://easy-mpl.readthedocs.io/en/latest/#module-4
"""
assert self.data_is_3d, f"data must be 3d but it is has {self.x.shape}"
imp = np.stack([np.mean(v, axis=1) for v in self.importances.values()])
lookback = imp.shape[0]
ytick_labels = [f"t-{int(i)}" for i in np.linspace(lookback - 1, 0, lookback)]
im = imshow(
imp,
yticklabels=ytick_labels,
xticklabels=self.features if len(self.features) <= 14 else None,
ax_kws=dict(
ylabel="Lookack steps",
xlabel="Input Features",
title=f"Base Score {round(self.base_score, 3)} with {ERROR_LABELS[self.scoring]}",
),
annotate=annotate,
colorbar=True,
show=False,
**kwargs
)
axes = im.axes
axes.set_xticklabels(axes.get_xticklabels(), rotation=90)
if self.show:
plt.show(
)
return axes
def plot_1d_pimp(
self,
plot_type:str = "boxplot",
**kwargs
) -> plt.Axes:
"""Plots the 1d permutation importance either as box-plot or as bar_chart
Arguments
---------
plot_type : str, optional
either boxplot or barchart
**kwargs :
keyword arguments either for boxplot or bar_chart
Returns
-------
matplotlib AxesSubplot
"""
if isinstance(self.importances, np.ndarray):
if self.cat_map is not None:
feats = make_feature_list(self.features, self.cat_map)
else:
feats = self.features
ax = self._plot_pimp(self.importances,
feats,
plot_type=plot_type,
**kwargs)
else:
for idx, importance in enumerate(self.importances.values()):
if self.data_is_3d:
features = self.features
else:
features = self.features[idx]
ax = self._plot_pimp(importance,
features,
plot_type=plot_type,
name=idx,
**kwargs
)
plt.close('all')
return ax
def _permute_importance_2d(
self,
inputs,
inp_idx=None,
time_step=None,
**kwargs
):
"""
calculates permutation importance by permuting columns in inputs
which is supposed to be 2d array. args are optional inputs to model.
"""
original_inp_idx = inp_idx
if inp_idx is None:
inputs = [inputs]
inp_idx = 0
permuted_x = inputs[inp_idx].copy()
feat_dim = 1 # feature dimension (0, 1, 2)
if time_step is not None:
feat_dim = 2
col_indices = list(range(permuted_x.shape[feat_dim]))
if self.cat_map is not None:
col_indices = create_index(col_indices, self.cat_map)
# empty container to keep results
# (num_features, n_repeats)
results = np.full((len(col_indices), self.n_repeats), np.nan)
# todo, instead of having two for loops, we can perturb the
# inputs at once and concatenate
# them as one input and thus call the `model` only once
for col_idx, col_index in enumerate(col_indices):
# instead of calling the model/func for each n_repeat, prepare the data
# for all n_repeats and stack it and call the model/func once.
# This reduces calls to model from num_inputs * n_repeats -> num_inputs
permuted_inputs = np.full((len(permuted_x)*self.n_repeats, *permuted_x.shape[1:]), np.nan)
st, en = 0, len(permuted_x)
rng = np.random.default_rng(self.seed)
for n_round in range(self.n_repeats):
# sklearn sets the random state before permuting each feature
# also sklearn sets the RandomState insite a function therefore
# the results from this function will not be reproducible with
# sklearn and vice versa
# We should make a fresh copy because the permuted_x from previous
# iteration has been modified
permuted_x_ = permuted_x.copy()
if time_step is None:
permuted_feature = rng.permutation(
permuted_x_[:, col_index])
else:
permuted_feature = rng.permutation(
permuted_x_[:, time_step, col_index]
)
if self.noise is not None:
if self.use_noise_only:
permuted_feature = self.noise
else:
permuted_feature += self.noise
if time_step is None:
permuted_x_[:, col_index] = permuted_feature
else:
permuted_x_[:, time_step, col_index] = permuted_feature
permuted_inputs[st:en] = permuted_x_
st = en
en += len(permuted_x)
results[col_idx] = self._eval(original_inp_idx,
inputs,
inp_idx,
permuted_inputs,
len(permuted_x),
**kwargs)
if self.scoring in ["mse", "rmse", "rmsle", "mape"]:
results = self.base_score + results
else:
# permutation importance is how much performance decreases by permutation
results = self.base_score - results
gc.collect()
if time_step:
print(f"finished for time_step {time_step}")
return results
def _permute_importance_2d1(
self,
inputs
):
"""
todo inorder to reproduce sklearn's results, use this function
"""
def _func(_inputs, col_idx):
permuted_x = _inputs.copy()
scores = np.full(self.n_repeats, np.nan)
random_state = np.random.RandomState(self.seed)
for n_round in range(self.n_repeats):
perturbed_feature = permuted_x[:, col_idx]
random_state.shuffle(perturbed_feature)
if self.noise is not None:
if self.use_noise_only:
perturbed_feature = self.noise
else:
perturbed_feature += self.noise
permuted_x[:, col_idx] = perturbed_feature
prediction = self.model(permuted_x)
scores[n_round] = self._score(prediction)
return scores
# empty container to keep results
results = np.full((inputs.shape[1], self.n_repeats), np.nan)
for col_index in range(inputs.shape[1]):
results[col_index, :] = _func(inputs, col_index)
# permutation importance is how much performance decreases by permutation
results = self.base_score - results
return results
def _plot_pimp(
self,
imp,
features,
axes=None,
name=None,
plot_type="boxplot",
**kwargs
):
ax_kws = dict(xlabel=ERROR_LABELS.get(self.scoring, self.scoring),
title=f"Base Score {round(self.base_score, 3)}")
importances_mean = np.mean(imp, axis=1)
perm_sorted_idx = importances_mean.argsort()
if plot_type == "boxplot":
axes, _ = boxplot(
imp[perm_sorted_idx].T, # (num_features, n_repeats) -> (n_repeats, num_features)
vert=False,
labels=np.array(features)[perm_sorted_idx],
ax=axes,
show=False,
ax_kws=ax_kws,
**kwargs
)
else:
axes = bar_chart(importances_mean, features, show=False, ax=axes,
ax_kws=ax_kws, sort=True, **kwargs)
if self.save:
name = name or ''
fname = os.path.join(self.path, f"{plot_type}_{name}_{self.n_repeats}_{self.scoring}")
plt.savefig(fname, bbox_inches="tight")
if self.show:
plt.show()
return axes
def _eval(self,
original_inp_idx,
inputs,
inp_idx,
permuted_inp,
batch_size,
**kwargs):
"""batch size here refers to number of examples in one `n_round`."""
# don't disturb the original input data, create new one
new_inputs = [None]*len(inputs)
new_inputs[inp_idx] = permuted_inp
if original_inp_idx is None: # inputs were not list so unpack the list
prediction = self.model(*new_inputs, **kwargs)
else:
for idx, inp in enumerate(inputs):
if idx != inp_idx:
new_inputs[idx] = np.concatenate([inp for _ in range(self.n_repeats)])
prediction = self.model(new_inputs, **kwargs)
st, en = 0, batch_size
scores = np.full(self.n_repeats, np.nan)
for n_round in range(self.n_repeats):
pred = prediction[st:en]
scores[n_round] = self._score(pred)
st = en
en += batch_size
gc.collect()
return scores
def create_index(index:list, cat_mapper:dict)->list:
"""
mp = [[1,2,3], [8,9,10]]
ci = [0,1,2,3,4,5,6,7,8,9,10]
result will be
[0, [1,2,3], 4, 5,6,7,[8,9,10]]
"""
mp = list(cat_mapper.values())
ci = index.copy()
for sub_list in mp:
for element in sub_list:
if element in ci:
ci.insert(ci.index(element), sub_list)
for i in sub_list:
ci.remove(i)
break
return ci
def make_feature_list(featur_list:list, cat_map:dict)->list:
"""
mp = [[1,2,3], [8,9,10]]
ci = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k']
out = ['a', 'x', 'e', 'f', 'g', 'h', 'y']
"""
featur_list = featur_list.copy()
for key, index_list in cat_map.items():
for index in index_list:
featur_list.pop(index)
featur_list.insert(index, key)
return list(dict.fromkeys(featur_list)) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/explain/_permutation_importance.py | _permutation_importance.py |
from typing import Union, Callable, List
try:
import shap
from shap import Explanation
except ModuleNotFoundError:
shap = None
Explanation = None
import scipy as sp
try:
import tensorflow.keras.backend as K
except ModuleNotFoundError:
K = None
from ._explain import ExplainerMixin
from .utils import convert_ai4water_model
from ai4water.backend import sklearn_models, np, pd, os, plt, easy_mpl
class ShapExplainer(ExplainerMixin):
"""
Wrapper around SHAP `explainers` and `plots` to draw and save all the plots
for a given model.
Attributes:
features :
train_summary : only for KernelExplainer
explainer :
shap_values :
Methods
--------
- summary_plot
- force_plot_single_example
- dependence_plot_single_feature
- force_plot_all
Examples:
>>> from ai4water.postprocessing import ShapExplainer
>>> from sklearn.model_selection import train_test_split
>>> from sklearn import linear_model
>>> import shap
...
>>> X,y = shap.datasets.diabetes()
>>> X_train,X_test,y_train,y_test = train_test_split(X, y, test_size=0.2, random_state=0)
>>> lin_regr = linear_model.LinearRegression()
>>> lin_regr.fit(X_train, y_train)
>>> explainer = ShapExplainer(lin_regr, X_test, X_train, num_means=10)
>>> explainer()
"""
allowed_explainers = [
"Explainer",
"DeepExplainer",
"TreeExplainer",
"KernelExplainer",
"LinearExplainer",
"AdditiveExplainer",
"GPUTreeExplainer",
"GradientExplainer",
"PermutationExplainer",
"SamplingExplainer",
"PartitionExplainer"
]
def __init__(
self,
model,
data: Union[np.ndarray, pd.DataFrame, List[np.ndarray]],
train_data: Union[np.ndarray, pd.DataFrame, List[np.ndarray]] = None,
explainer: Union[str, Callable] = None,
num_means: int = 10,
path: str = None,
feature_names: list = None,
framework: str = None,
layer: Union[int, str] = None,
save: bool = True,
show: bool = True,
):
"""
Args:
model :
a Model/regressor/classifier from sklearn/xgboost/catboost/LightGBM/tensorflow/pytorch/ai4water
The model must have a `predict` method.
data :
Data on which to make interpretation. Its dimension should be
same as that of training data. It can be either training or test
data
train_data :
The data on which the `model` was trained. It is used to
get train_summary. It can a numpy array or a pandas DataFrame.
Only required for scikit-learn based models.
explainer : str
the explainer to use. If not given, the explainer will be inferred.
num_means : int
Numher of means, used in `shap.kmeans` to calculate train_summary
using shap.kmeans. Only used when explainer is "KernelExplainer"
path : str
path to save the plots. By default, plots will be saved in current
working directory
feature_names : list
Names of features. Should only be given if train/test data is numpy
array.
framework : str
either "DL" or "ML". Here "DL" shows that the `model` is a deep
learning or neural network based model and "ML" represents other
models. For "DL" the explainer will be either "DeepExplainer" or
"GradientExplainer". If not given, it will be inferred. In such
a case "DeepExplainer" will be prioritized over "GradientExplainer"
for DL frameworks and "TreeExplainer" will be prioritized for "ML"
frameworks.
layer : Union[int, str]
only relevant when framework is "DL" i.e when the model consits of layers
of neural networks.
show:
whether to show the plot or not
save:
whether to save the plot or not
"""
assert shap is not None, f"""
shap package must be installed to use this class.
please install shap e.g with pip install shap
"""
test_data = maybe_to_dataframe(data, feature_names)
train_data = maybe_to_dataframe(train_data, feature_names)
super(ShapExplainer, self).__init__(path=path or os.getcwd(),
data=test_data,
features=feature_names,
save=save,
show=show
)
if train_data is None:
self._check_data(test_data)
else:
self._check_data(train_data, test_data)
model, framework, explainer, model_name = convert_ai4water_model(model,
framework,
explainer)
self.is_sklearn = True
if model_name not in sklearn_models:
if model_name in ["XGBRegressor",
"XGBClassifier",
"LGBMRegressor",
"LGBMClassifier",
"CatBoostRegressor",
"CatBoostClassifier"
"XGBRFRegressor"
"XGBRFClassifier"
]:
self.is_sklearn = False
elif not self._is_dl(model):
raise ValueError(f"{model.__class__.__name__} is not a valid model model")
self._framework = self.infer_framework(model, framework, layer, explainer)
self.model = model
self.data = test_data
self.layer = layer
self.features = feature_names
self.explainer = self._get_explainer(explainer, train_data=train_data, num_means=num_means)
self.shap_values = self.get_shap_values(test_data)
@staticmethod
def _is_dl(model):
if hasattr(model, "count_params") or hasattr(model, "named_parameters"):
return True
return False
@property
def layer(self):
return self._layer
@layer.setter
def layer(self, x):
if x is not None:
if not isinstance(x, str):
assert isinstance(x, int), f"layer must either b string or integer"
assert x <= len(self.model.layers) # todo, what about pytorch
self._layer = x
def map2layer(self, x, layer):
feed_dict = dict(zip([self.model.layers[0].input], [x.copy()]))
import tensorflow as tf
if int(tf.__version__[0]) < 2:
sess = K.get_session()
else:
sess = tf.compat.v1.keras.backend.get_session()
if isinstance(layer, int):
return sess.run(self.model.layers[layer].input, feed_dict)
else:
return sess.run(self.model.get_layer(layer).input, feed_dict)
def infer_framework(self, model, framework, layer, explainer):
if framework is not None:
inf_framework = framework
elif self._is_dl(model):
inf_framework = "DL"
elif isinstance(explainer, str) and explainer in ("DeepExplainer", "GradientExplainer"):
inf_framework = "DL"
elif explainer.__class__.__name__ in ("DeepExplainer", "GradientExplainer"):
inf_framework = "DL"
else:
inf_framework = "ML"
assert inf_framework in ("ML", "DL")
if inf_framework != "DL":
assert layer is None
if inf_framework == "DL" and isinstance(explainer, str):
assert explainer in ("DeepExplainer",
"GradientExplainer",
"PermutationExplainer"), f"invalid explainer {inf_framework}"
return inf_framework
def _get_explainer(self,
explainer: Union[str, Callable],
num_means,
train_data
):
if explainer is not None:
if callable(explainer):
return explainer
assert isinstance(explainer, str), f"explainer should be callable or string but" \
f" it is {explainer.__class__.__name__}"
assert explainer in self.allowed_explainers, f"{explainer} is not a valid explainer"
if explainer == "KernelExplainer":
explainer = self._get_kernel_explainer(train_data, num_means)
elif explainer == "DeepExplainer":
explainer = self._get_deep_explainer()
elif explainer == "GradientExplainer":
explainer = self._get_gradient_explainer()
elif explainer == "PermutationExplainer":
explainer = shap.PermutationExplainer(self.model, self.data)
else:
explainer = getattr(shap, explainer)(self.model)
else: # explainer is not given explicitly, we need to infer it
explainer = self._infer_explainer_to_use(train_data, num_means)
return explainer
def _get_kernel_explainer(self, data, num_means):
assert isinstance(num_means,
int), f'num_means should be integer but given value is of type {num_means.__class__.__name__}'
if data is None:
raise ValueError("Provide train_data in order to use KernelExplainer.")
self.train_summary = shap.kmeans(data, num_means)
explainer = shap.KernelExplainer(self.model.predict, self.train_summary)
return explainer
def _infer_explainer_to_use(self, train_data, num_means):
"""Tries to infer explainer to use from the type of model."""
# todo, Fig 3 of Lundberberg's Nature MI paper shows that TreeExplainer
# performs better than KernelExplainer, so try to use supports_model_with_masker
if self.model.__class__.__name__ in ["XGBRegressor", "LGBMRegressor", "CatBoostRegressor",
"XGBRFRegressor"]:
explainer = shap.TreeExplainer(self.model)
elif self.model.__class__.__name__ in sklearn_models:
explainer = self._get_kernel_explainer(train_data, num_means)
elif self._framework == "DL":
explainer = self._get_deep_explainer()
else:
raise ValueError(f"Can not infer explainer for model {self.model.__class__.__name__}."
f" Plesae specify explainer by using `explainer` keyword argument")
return explainer
def _get_deep_explainer(self):
data = self.data.values if isinstance(self.data, pd.DataFrame) else self.data
return getattr(shap, "DeepExplainer")(self.model, data)
def _get_gradient_explainer(self):
if self.layer is None:
# GradientExplainer is also possible without specifying a layer
return shap.GradientExplainer(self.model, self.data)
if isinstance(self.layer, int):
return shap.GradientExplainer((self.model.layers[self.layer].input, self.model.layers[-1].output),
self.map2layer(self.data, self.layer))
else:
return shap.GradientExplainer((self.model.get_layer(self.layer).input, self.model.layers[-1].output),
self.map2layer(self.data, self.layer))
def _check_data(self, *data):
if self.single_source:
for d in data:
assert type(d) == np.ndarray or type(d) == pd.DataFrame, f"""
data must be numpy array or pandas dataframe but it is of type {d.__class__.__name__}"""
assert len(set([d.ndim for d in data])) == 1, "train and test data should have same ndim"
assert len(set([d.shape[-1] for d in data])) == 1, "train and test data should have same input features"
assert len(set([type(d) for d in data])) == 1, "train and test data should be of same type"
return
def get_shap_values(self, data, **kwargs):
if self.explainer.__class__.__name__ in ["Permutation"]:
return self.explainer(data)
elif self._framework == "DL":
return self._shap_values_dl(data, **kwargs)
return self.explainer.shap_values(data)
def _shap_values_dl(self, data, ranked_outputs=None, **kwargs):
"""Gets the SHAP values"""
data = data.values if isinstance(data, pd.DataFrame) else data
if self.explainer.__class__.__name__ == "Deep":
shap_values = self.explainer.shap_values(data, ranked_outputs=ranked_outputs, **kwargs)
elif isinstance(self.explainer, shap.GradientExplainer) and self.layer is None:
shap_values = self.explainer.shap_values(data, ranked_outputs=ranked_outputs, **kwargs)
else:
shap_values = self.explainer.shap_values(self.map2layer(data, self.layer),
ranked_outputs=ranked_outputs, **kwargs)
if ranked_outputs:
shap_values, indexes = shap_values
return shap_values
def __call__(self,
force_plots=True,
plot_force_all=False,
dependence_plots=False,
beeswarm_plots=False,
heatmap=False,
):
"""Draws and saves all the plots for a given sklearn model in the path.
plot_force_all is set to False by default because it is causing
Process finished error due. Avoiding this error is a complex function
of scipy and numba versions.
"""
if dependence_plots:
for feature in self.features:
self.dependence_plot_single_feature(feature, f"dependence_plot_{feature}")
if force_plots:
for i in range(self.data.shape[0]):
self.force_plot_single_example(i, f"force_plot_{i}")
if beeswarm_plots:
self.beeswarm_plot()
if plot_force_all:
self.force_plot_all("force_plot_all")
if heatmap:
self.heatmap()
self.summary_plot("summary_plot")
return
def summary_plot(
self,
plot_type: str = None,
name: str = "summary_plot",
**kwargs
):
"""
Plots the `summary <https://shap-lrjball.readthedocs.io/en/latest/generated/shap.summary_plot.html#shap.summary_plot>`_
plot of SHAP package.
Arguments:
plot_type : str,
either "bar", or "violen" or "dot"
name:
name of saved file
kwargs:
any keyword arguments to shap.summary_plot
"""
def _summary_plot(_shap_val, _data, _features, _name):
plt.close('all')
shap.summary_plot(_shap_val, _data, show=False, plot_type=plot_type,
feature_names=_features,
**kwargs)
if self.save:
plt.savefig(os.path.join(self.path, _name + " _bar"), dpi=300,
bbox_inches="tight")
if self.show:
plt.show()
return
shap_vals = self.shap_values
if isinstance(shap_vals, list) and len(shap_vals) == 1:
shap_vals = shap_vals[0]
data = self.data
if self.single_source:
if data.ndim == 3:
assert shap_vals.ndim == 3
for lookback in range(data.shape[1]):
_summary_plot(shap_vals[:, lookback], data[:, lookback], self.features, _name=f"{name}_{lookback}")
else:
_summary_plot(shap_vals, data, self.features, name)
else:
# data is a list of data sources
for idx, _data in enumerate(data):
if _data.ndim == 3:
for lb in range(_data.shape[1]):
_summary_plot(shap_vals[idx][:, lb], _data[:, lb], self.features[idx],
_name=f"{name}_{idx}_{lb}")
else:
_summary_plot(shap_vals[idx], _data, self.features[idx], _name=f"{name}_{idx}")
return
def force_plot_single_example(
self,
idx:int,
name=None,
**force_kws
):
"""Draws force_plot_
for a single example/row/sample/instance/data point.
If the data is 3d and shap values are 3d then they are unrolled/flattened
before plotting
Arguments:
idx:
index of exmaple to use. It can be any value >=0
name:
name of saved file
force_kws : any keyword argument for force plot
Returns:
plotter object
.. _force_plot:
https://shap.readthedocs.io/en/latest/generated/shap.plots.force.html
"""
shap_vals = self.shap_values
if isinstance(shap_vals, list) and len(shap_vals) == 1:
shap_vals = shap_vals[0]
shap_vals = shap_vals[idx]
if type(self.data) == np.ndarray:
data = self.data[idx]
else:
data = self.data.iloc[idx, :]
if self.explainer.__class__.__name__ == "Gradient":
expected_value = [0]
else:
expected_value = self.explainer.expected_value
features = self.features
if data.ndim == 2 and shap_vals.ndim == 2: # input was 3d i.e. ml model uses 3d input
features = self.unrolled_features
expected_value = expected_value[0] # todo
shap_vals = shap_vals.reshape(-1,)
data = data.reshape(-1, )
plt.close('all')
plotter = shap.force_plot(
expected_value,
shap_vals,
data,
feature_names=features,
show=False,
matplotlib=True,
**force_kws
)
if self.save:
name = name or f"force_plot_{idx}"
plotter.savefig(os.path.join(self.path, name), dpi=300, bbox_inches="tight")
if self.show:
plotter.show()
return plotter
def dependence_plot_all_features(self, **dependence_kws):
"""dependence plot for all features"""
for feature in self.features:
self.dependence_plot_single_feature(feature, f"dependence_plot_{feature}",
**dependence_kws)
return
def dependence_plot_single_feature(self, feature, name="dependence_plot", **kwargs):
"""dependence_ plot for a single feature. See this_ .
.. _dependence:
https://slundberg.github.io/shap/notebooks/plots/dependence_plot.html
.. _this:
https://shap-lrjball.readthedocs.io/en/docs_update/generated/shap.dependence_plot.html
"""
plt.close('all')
if len(name) > 150: # matplotlib raises error if the length of filename is too large
name = name[0:150]
shap_values = self.shap_values
if isinstance(shap_values, list) and len(shap_values) == 1:
shap_values = shap_values[0]
shap.dependence_plot(feature,
shap_values,
self.data,
show=False,
**kwargs)
if self.save:
plt.savefig(os.path.join(self.path, name), dpi=300, bbox_inches="tight")
if self.show:
plt.show()
return
def force_plot_all(self, name="force_plot.html", save=True, show=True, **force_kws):
"""draws force plot for all examples in the given data and saves it in an html"""
# following scipy versions cause kernel stoppage when calculating
if sp.__version__ in ["1.4.1", "1.5.2", "1.7.1"]:
print(f"force plot can not be plotted for scipy version {sp.__version__}. Please change your scipy")
return
shap_values = self.shap_values
if isinstance(shap_values, list) and len(shap_values) == 1:
shap_values = shap_values[0]
plt.close('all')
plot = shap.force_plot(self.explainer.expected_value, shap_values, self.data, **force_kws)
if save:
shap.save_html(os.path.join(self.path, name), plot)
return
def waterfall_plot_all_examples(
self,
name: str = "waterfall",
**waterfall_kws
):
"""Plots the waterfall_ plot of SHAP package
It plots for all the examples/instances from test_data.
.. _waterfall:
https://shap.readthedocs.io/en/latest/generated/shap.plots.waterfall.html
"""
for i in range(len(self.data)):
self.waterfall_plot_single_example(i, name=name, **waterfall_kws)
return
def waterfall_plot_single_example(
self,
example_index: int,
name: str = "waterfall",
max_display: int = 10,
):
"""draws and saves waterfall_ plot
for one example.
The waterfall plots are based upon SHAP values and show the
contribution by each feature in model's prediction. It shows which
feature pushed the prediction in which direction. They answer the
question, why the ML model simply did not predict mean of training y
instead of what it predicted. The mean of training observations that
the ML model saw during training is called base value or expected value.
Arguments:
example_index : int
index of example to use
max_display : int
maximu features to display
name : str
name of plot
.. _waterfall:
https://shap.readthedocs.io/en/latest/generated/shap.plots.waterfall.html
"""
if self.explainer.__class__.__name__ in ["Deep", "Kernel"]:
shap_vals_as_exp = None
else:
shap_vals_as_exp = self.explainer(self.data)
shap_values = self.shap_values
if isinstance(shap_values, list) and len(shap_values) == 1:
shap_values = shap_values[0]
plt.close('all')
if shap_vals_as_exp is None:
features = self.features
if not self.data_is_2d:
features = self.unrolled_features
# waterfall plot expects first argument as Explaination class
# which must have at least these attributes (values, data, feature_names, base_values)
# https://github.com/slundberg/shap/issues/1420#issuecomment-715190610
if not self.data_is_2d: # if original data is 3d then we flat it into 1d array
values = shap_values[example_index].reshape(-1, )
data = self.data[example_index].reshape(-1, )
else:
values = shap_values[example_index]
data = self.data.iloc[example_index]
exp_value = self.explainer.expected_value
if self.explainer.__class__.__name__ in ["Kernel"]:
pass
else:
exp_value = exp_value[0]
e = Explanation(
values,
base_values=exp_value,
data=data,
feature_names=features
)
shap.plots.waterfall(e, show=False, max_display=max_display)
else:
shap.plots.waterfall(shap_vals_as_exp[example_index], show=False, max_display=max_display)
if self.save:
plt.savefig(os.path.join(self.path, f"{name}_{example_index}"),
dpi=300,
bbox_inches="tight")
if self.show:
plt.show()
return
def scatter_plot_single_feature(
self,
feature: int,
name: str = "scatter",
**scatter_kws
):
"""scatter plot for a single feature"""
if self.explainer.__class__.__name__ in ["Kernel"]:
shap_values = Explanation(self.shap_values, data=self.data.values,
feature_names=self.features)
else:
shap_values = self.explainer(self.data)
shap.plots.scatter(shap_values[:, feature], show=False, **scatter_kws)
if self.save:
plt.savefig(os.path.join(self.path, f"{name}_{feature}"), dpi=300, bbox_inches="tight")
if self.show:
plt.show()
return
def scatter_plot_all_features(self, name="scatter_plot", **scatter_kws):
"""draws scatter plot for all features"""
if isinstance(self.data, pd.DataFrame):
features = self.features
else:
features = [i for i in range(self.data.shape[-1])]
for feature in features:
self.scatter_plot_single_feature(feature, name=name, **scatter_kws)
return
def heatmap(self, name: str = 'heatmap', max_display=10):
"""Plots the heatmap_ and saves it
This can be drawn for xgboost/lgbm as well as for randomforest type models
but not for CatBoostRegressor which is todo.
Note
----
The upper line plot on the heat map shows $-fx/max(abs(fx))$ where $fx$ is
the mean SHAP value of all features. The length of $fx$ is equal to length
of data/examples. Thus one point on this line is the mean of SHAP values
of all input features for the given/one example normalized by the maximum
absolute value of $fx$.
.. _heatmap:
https://shap.readthedocs.io/en/latest/example_notebooks/api_examples/plots/heatmap.html
"""
# if heat map is drawn with np.ndarray, it throws errors therefore convert
# it into pandas DataFrame. It is more interpretable and does not hurt.
try:
shap_values = self._get_shap_values_locally()
except (ValueError, AttributeError): # some times we are not able to calculate shap values as
# being calcultaed inside '_get_shap_values_locally'
shap_values = Explanation(
self.shap_values,
data=self.data.values,
feature_names=self.features)
# by default examples are ordered in such a way that examples with similar
# explanations are grouped together.
self._heatmap(shap_values, f"{name}_basic",
max_display=max_display)
# sort by the maximum absolute value of a feature over all the examples
self._heatmap(shap_values, f"{name}_sortby_maxabs",
max_display=max_display,
feature_values=shap_values.abs.max(0))
# sorting by the sum of the SHAP values over all features gives a complementary perspective on the data
self._heatmap(shap_values, f"{name}_sortby_SumOfShap",
max_display=max_display,
instance_order=shap_values.sum(1))
return
def _heatmap(self, shap_values, name, max_display=10, **kwargs):
plt.close('all')
# set show to False because we want to reset xlabel
shap.plots.heatmap(shap_values, show=False, max_display=max_display,
**kwargs)
plt.xlabel("Examples")
if self.save:
plt.savefig(os.path.join(self.path, f"{name}_sortby_SumOfShap"), dpi=300,
bbox_inches="tight")
if self.show:
plt.show()
return
def _get_shap_values_locally(self):
data = self.data
if not isinstance(self.data, pd.DataFrame) and data.ndim == 2:
data = pd.DataFrame(self.data, columns=self.features)
# not using global explainer because, this explainer should data as well
explainer = shap.Explainer(self.model, data)
shap_values = explainer(data)
return shap_values
def beeswarm_plot(
self,
name: str = "beeswarm",
max_display: int = 10,
**kwargs
):
"""
Draws the beeswarm_ plot of shap.
Arguments:
name : str
name of saved file
max_display :
maximum
kwargs :
any keyword arguments for shap.beeswarm plot
.. _beeswarm:
https://shap.readthedocs.io/en/latest/example_notebooks/api_examples/plots/beeswarm.html
"""
try:
shap_values = self._get_shap_values_locally()
except (ValueError, AttributeError):
shap_values = Explanation(self.shap_values, data=self.data.values,
feature_names=self.features)
self._beeswarm_plot(shap_values,
name=f"{name}_basic",
max_display=max_display,
**kwargs)
# find features with high impacts
self._beeswarm_plot(shap_values, name=f"{name}_sortby_maxabs",
max_display=max_display,
order=shap_values.abs.max(0), **kwargs)
# plot the absolute value
self._beeswarm_plot(shap_values.abs,
name=f"{name}_abs_shapvalues",
max_display=max_display,
**kwargs)
return
def _beeswarm_plot(self, shap_values, name, max_display=10, **kwargs):
plt.close('all')
shap.plots.beeswarm(shap_values,
show=False,
max_display=max_display,
**kwargs)
if self.save:
plt.savefig(os.path.join(self.path, name), dpi=300, bbox_inches="tight")
if self.show:
plt.show()
return
def decision_plot(
self,
indices=None,
name: str = "decision_",
**decision_kwargs):
"""decision_ plot. For details see this blog.
.. _decision:
https://shap.readthedocs.io/en/latest/example_notebooks/api_examples/plots/decision_plot.html
.. _blog:
https://towardsdatascience.com/introducing-shap-decision-plots-52ed3b4a1cba
"""
shap_values = self.shap_values
legend_location = "best"
legend_labels = None
if indices is not None:
shap_values = shap_values[(indices), :]
if len(shap_values) <= 10:
legend_labels = indices
legend_location = "lower right"
if self.explainer.__class__.__name__ in ["Tree"]:
shap.decision_plot(self.explainer.expected_value,
shap_values,
self.features,
legend_labels=legend_labels,
show=False,
legend_location=legend_location,
**decision_kwargs)
if self.save:
plt.savefig(os.path.join(self.path, name), dpi=300, bbox_inches="tight")
if self.show:
plt.show()
else:
raise NotImplementedError
def plot_shap_values(
self,
interpolation=None,
cmap="coolwarm",
name: str = "shap_values",
):
"""Plots the SHAP values.
Arguments:
name:
name of saved file
interpolation:
interpolation argument to axis.imshow
cmap:
color map
"""
shap_values = self.shap_values
if isinstance(shap_values, list) and len(shap_values) == 1:
shap_values: np.ndarray = shap_values[0]
def plot_shap_values_single_source(_data, _shap_vals, _features, _name):
if _data.ndim == 3 and _shap_vals.ndim == 3: # input is 3d
# assert _shap_vals.ndim == 3
return imshow_3d(_shap_vals,
_data,
_features,
name=_name,
path=self.path,
show=self.show,
cmap=cmap)
plt.close('all')
fig, axis = plt.subplots()
im = axis.imshow(_shap_vals.T,
aspect='auto',
interpolation=interpolation,
cmap=cmap
)
if _features is not None: # if imshow is successful then don't worry if features are None
axis.set_yticks(np.arange(len(_features)))
axis.set_yticklabels(_features)
axis.set_ylabel("Features")
axis.set_xlabel("Examples")
fig.colorbar(im)
if self.save:
plt.savefig(os.path.join(self.path, _name), dpi=300, bbox_inches="tight")
if self.show:
plt.show()
return
if self.single_source:
plot_shap_values_single_source(self.data, shap_values, self.features, name)
else:
for idx, d in enumerate(self.data):
plot_shap_values_single_source(d,
shap_values[idx],
self.features[idx],
f"{idx}_{name}")
return
def pdp_all_features(
self,
**pdp_kws
):
"""partial dependence plot of all features.
Arguments:
pdp_kws:
any keyword arguments
"""
for feat in self.features:
self.pdp_single_feature(feat, **pdp_kws)
return
def pdp_single_feature(
self,
feature_name: str,
**pdp_kws
):
"""partial depence plot using SHAP package for a single feature."""
shap_values = None
if hasattr(self.shap_values, 'base_values'):
shap_values = self.shap_values
if self.model.__class__.__name__.startswith("XGB"):
self.model.get_booster().feature_names = self.features
fig = shap.partial_dependence_plot(
feature_name,
model=self.model.predict,
data=self.data,
model_expected_value=True,
feature_expected_value=True,
shap_values=shap_values,
feature_names=self.features,
show=False,
**pdp_kws
)
if self.save:
fname = f"pdp_{feature_name}"
plt.savefig(os.path.join(self.path, fname), dpi=300, bbox_inches="tight")
if self.show:
plt.show()
return fig
def imshow_3d(values,
data,
feature_names: list,
path, vmin=None, vmax=None,
name="",
show=False,
cmap=None,
):
num_examples, lookback, input_features = values.shape
assert data.shape == values.shape
for idx, feat in enumerate(feature_names):
plt.close('all')
fig, (ax1, ax2) = plt.subplots(2, sharex='all', figsize=(10, 12))
yticklabels=[f"t-{int(i)}" for i in np.linspace(lookback - 1, 0, lookback)]
axis, im = easy_mpl.imshow(data[:, :, idx].transpose(),
yticklabels=yticklabels,
ax=ax1,
vmin=vmin,
vmax=vmax,
title=feat,
cmap=cmap,
show=False
)
fig.colorbar(im, ax=axis, orientation='vertical', pad=0.2)
axis, im = easy_mpl.imshow(values[:, :, idx].transpose(),
yticklabels=yticklabels,
vmin=vmin, vmax=vmax,
xlabel="Examples",
title=f"SHAP Values",
cmap=cmap,
show=False,
ax=ax2)
fig.colorbar(im, ax=axis, orientation='vertical', pad=0.2)
_name = f'{name}_{feat}_shap_values'
plt.savefig(os.path.join(path, _name), dpi=400, bbox_inches='tight')
if show:
plt.show()
return
def infer_framework(model):
if hasattr(model, 'config') and 'backend' in model.config:
framework = model.config['backend']
elif type(model) is tuple:
a, _ = model
try:
a.named_parameters()
framework = 'pytorch'
except:
framework = 'tensorflow'
else:
try:
model.named_parameters()
framework = 'pytorch'
except:
framework = 'tensorflow'
return framework
def maybe_to_dataframe(data, features=None) -> pd.DataFrame:
if isinstance(data, np.ndarray) and isinstance(features, list) and data.ndim == 2:
data = pd.DataFrame(data, columns=features)
return data | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/explain/_shap.py | _shap.py |
from typing import Callable, Union, List
from easy_mpl import plot
from ai4water.backend import np, os, plt, pd
from ._explain import ExplainerMixin
# todo, optionally show predicted value as dots on plots
def compute_bounds(xmin, xmax, xv):
"""
from shap
Handles any setting of xmax and xmin.
Note that we handle None, float, or "percentile(float)" formats.
"""
if xmin is not None or xmax is not None:
if type(xmin) == str and xmin.startswith("percentile"):
xmin = np.nanpercentile(xv, float(xmin[11:-1]))
if type(xmax) == str and xmax.startswith("percentile"):
xmax = np.nanpercentile(xv, float(xmax[11:-1]))
if xmin is None or xmin == np.nanmin(xv):
xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20
if xmax is None or xmax == np.nanmax(xv):
xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20
return xmin, xmax
class PartialDependencePlot(ExplainerMixin):
"""
Partial dependence plots as introduced by Friedman_ et al., 2001
Example
-------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> from ai4water.postprocessing.explain import PartialDependencePlot
>>> data = busan_beach()
>>> model = Model(model="XGBRegressor")
>>> model.fit(data=data)
# get the data to explain
>>> x, _ = model.training_data()
>>> pdp = PartialDependencePlot(model.predict, x, model.input_features,
>>> num_points=14)
.. _Friedman:
https://doi.org/10.1214/aos/1013203451
"""
def __init__(
self,
model: Callable,
data,
feature_names=None,
num_points: int = 100,
path=None,
save: bool = True,
show: bool = True,
**kwargs
):
"""Initiates the class
Parameters
----------
model : Callable
the trained/calibrated model which must be callable. It must take the
`data` as input and sprout an array of predicted values. For example
if you are using Keras/sklearn model, then you must pass model.predict
data : np.ndarray, pd.DataFrame
The inputs to the `model`. It can numpy array or pandas DataFrame.
feature_names : list, optional
Names of features. Used for labeling.
num_points : int, optional
determines the grid for evaluation of `model`
path : str, optional
path to save the plots. By default the results are saved in current directory
show:
whether to show the plot or not
save:
whether to save the plot or not
**kwargs :
any additional keyword arguments for `model`
"""
self.model = model
self.num_points = num_points
self.xmin = "percentile(0)"
self.xmax = "percentile(100)"
self.kwargs = kwargs
if isinstance(data, pd.DataFrame):
if feature_names is None:
feature_names = data.columns.tolist()
data = data.values
super().__init__(data=data,
features=feature_names,
path=path or os.getcwd(),
show=show,
save=save
)
def nd_interactions(
self,
height: int = 2,
ice: bool = False,
show_dist: bool = False,
show_minima: bool = False,
) -> plt.Figure:
"""Plots 2d interaction plots of all features as done in skopt
Arguments:
height:
height of each subplot in inches
ice:
whether to show the ice lines or not
show_dist:
whether to show the distribution of data as histogram or not
show_minima:
whether to show the function minima or not
Returns:
matplotlib Figure
Examples
--------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> from ai4water.postprocessing.explain import PartialDependencePlot
>>> data = busan_beach()
>>> model = Model(model="XGBRegressor")
>>> model.fit(data=busan_beach())
>>> x, _ = model.training_data()
>>> pdp = PartialDependencePlot(model.predict, x, model.input_features,
... num_points=14)
>>> pdp.nd_interactions(show_dist=True)
"""
n_dims = len(self.features)
fig, ax = plt.subplots(n_dims, n_dims, figsize=(height * n_dims, height * n_dims))
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95,
hspace=0.1, wspace=0.1)
for i in range(n_dims):
for j in range(n_dims):
# diagonal
if i == j:
if n_dims > 1:
ax_ = ax[i, i]
else:
ax_ = ax
self._plot_pdp_1dim(*self.calc_pdp_1dim(self.data, self.features[i]),
self.data,
self.features[i],
show_dist=show_dist,
show_minima=show_minima,
ice=ice, show=False, save=False,
ax=ax_)
# resetting the label
# ax_.set_xlabel(self.features[i])
ax_.set_ylabel(self.features[i])
process_axis(ax_,
xlabel=self.features[i],
top_spine=True,
right_spine=True)
# lower triangle
elif i > j:
self.plot_interaction(
features=[self.features[j],self.features[i]],
ax=ax[i, j],
colorbar=False,
save=False,
show=False,
)
elif j > i:
if not ax[i, j].lines: # empty axes
ax[i, j].axis("off")
if j > 0: # not the left most column
ax[i, j].yaxis.set_ticks([])
ax[i, j].yaxis.set_visible(False)
ax[i, j].yaxis.label.set_visible(False)
if i < n_dims-1: # not the bottom most row
ax[i, j].xaxis.set_ticks([])
ax[i, j].xaxis.set_visible(False)
ax[i, j].xaxis.label.set_visible(False)
if self.save:
fname = os.path.join(self.path, f"pdp_interact_nd")
plt.savefig(fname, bbox_inches="tight", dpi=100*n_dims)
if self.show:
plt.show()
return fig
def plot_interaction(
self,
features: list,
lookback: int = None,
ax: plt.Axes = None,
plot_type: str = "2d",
cmap=None,
colorbar: bool = True,
show:bool = True,
save:bool = True,
**kwargs
) -> plt.Axes:
"""Shows interaction between two features
Parameters
----------
features :
a list or tuple of two feature names to use
lookback : optional
only relevant in data is 3d
ax : optional
matplotlib axes on which to draw. If not given, current axes will
be used.
plot_type : optional
either "2d" or "surface"
cmap : optional
color map to use
colorbar : optional
whether to show the colorbar or not
show : bool
save : bool
**kwargs :
any keyword argument for axes.plot_surface or axes.contourf
Returns
-------
matplotlib Axes
Examples
--------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> from ai4water.postprocessing.explain import PartialDependencePlot
>>> data = busan_beach()
>>> model = Model(model="XGBRegressor")
>>> model.fit(data=busan_beach())
>>> x, _ = model.training_data()
>>> pdp = PartialDependencePlot(model.predict, x, model.input_features,
... num_points=14)
... # specifying features whose interaction is to be calculated and plotted.
>>> axis = pdp.plot_interaction(["tide_cm", "wat_temp_c"])
"""
if not self.data_is_2d:
raise NotImplementedError
assert isinstance(features, list) and len(features) == 2
x0, x1, pd_vals = self._interaction(features, self.data, lookback)
kwds = {}
if plot_type == "surface":
kwds['projection'] = '3d'
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, **kwds)
if plot_type == "surface":
_add_surface(ax, x0, x1, pd_vals, cmap, features[0], features[1], **kwargs)
else:
self._plot_interaction(ax, x0, x1, pd_vals,
cmap=cmap,
features=features,
lookback=lookback,
colorbar=colorbar, **kwargs)
if save:
fname = os.path.join(self.path, f"pdp_interact{features[0]}_{features[1]}")
plt.savefig(fname, bbox_inches="tight", dpi=300)
if show:
plt.show()
return ax
def _plot_interaction(
self, ax, x0, x1, pd_vals, cmap,
features,
lookback,
colorbar=True,
**kwargs):
"""adds a 2d interaction plot"""
cntr = ax.contourf(x0, x1, pd_vals, cmap=cmap, **kwargs)
xv0 = self.xv(self.data, features[0], lookback)
xv1 = self.xv(self.data, features[1], lookback)
ax.scatter(xv0, xv1, c='k', s=10, lw=0.)
process_axis(ax, xlabel=features[0], ylabel=features[1])
if colorbar:
cbar = plt.colorbar(cntr, ax=ax)
cbar.set_label(f"E[f(x) | {features[0]}, {features[1]} ]", rotation=90)
return
def _interaction(self, features, data, lookback):
ind0 = self._feature_to_ind(features[0])
ind1 = self._feature_to_ind(features[1])
xs0 = self.grid(self.data, features[0], lookback)
xs1 = self.grid(self.data, features[1], lookback)
features_tmp = data.copy()
x0 = np.zeros((self.num_points, self.num_points))
x1 = np.zeros((self.num_points, self.num_points))
# instead of calling the model in two for loops, prepare data data
# stack it in 'features_all' and call the model only once.
total_samples = len(data) * self.num_points * self.num_points
features_all = np.full((total_samples, *data.shape[1:]), np.nan)
st, en = 0, len(data)
for i in range(self.num_points):
for j in range(self.num_points):
features_tmp[:, ind0] = xs0[i]
features_tmp[:, ind1] = xs1[j]
x0[i, j] = xs0[i]
x1[i, j] = xs1[j]
features_all[st:en] = features_tmp
st = en
en += len(data)
predictions = self.model(features_all)
pd_vals = np.zeros((self.num_points, self.num_points))
st, en = 0, len(data)
for i in range(self.num_points):
for j in range(self.num_points):
pd_vals[i, j] = predictions[st:en].mean()
st = en
en += len(data)
return x0, x1, pd_vals
def plot_1d(
self,
feature:Union[str, List[str]],
show_dist: bool = True,
show_dist_as: str = "hist",
ice: bool = True,
feature_expected_value: bool = False,
model_expected_value: bool = False,
show_ci: bool = False,
show_minima: bool = False,
ice_only: bool = False,
ice_color: str = "lightblue",
feature_name:str = None,
pdp_line_kws: dict = None,
ice_lines_kws: dict = None,
hist_kws:dict = None
):
"""partial dependence plot in one dimension
Parameters
----------
feature :
the feature name for which to plot the partial dependence
For one hot encoded categorical features, provide a list
show_dist :
whether to show actual distribution of data or not
show_dist_as :
one of "hist" or "grid"
ice :
whether to show individual component elements on plot or not
feature_expected_value :
whether to show the average value of feature on the plot or not
model_expected_value :
whether to show average prediction on plot or not
show_ci :
whether to show confidence interval of pdp or not
show_minima :
whether to indicate the minima or not
ice_only : bool, False
whether to show only ice plots
ice_color :
color for ice lines. It can also be a valid maplotlib
`colormap <https://matplotlib.org/3.5.1/tutorials/colors/colormaps.html>`_
feature_name : str
name of the feature. If not given, then value of ``feature`` is used.
pdp_line_kws : dict
any keyword argument for axes.plot when plotting pdp lie
ice_lines_kws : dict
any keyword argument for axes.plot when plotting ice lines
hist_kws :
any keyword arguemnt for axes.hist when plotting histogram
Examples
---------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> model = Model(model="XGBRegressor")
>>> data = busan_beach()
>>> model.fit(data=data)
>>> x, _ = model.training_data(data=data)
>>> pdp = PartialDependencePlot(model.predict, x, model.input_features,
... num_points=14)
>>> pdp.plot_1d("tide_cm")
with categorical features
>>> from ai4water.datasets import mg_photodegradation
>>> data, cat_enc, an_enc = mg_photodegradation(encoding="ohe")
>>> model = Model(model="XGBRegressor")
>>> model.fit(data=data)
>>> x, _ = model.training_data(data=data)
>>> pdp = PartialDependencePlot(model.predict, x, model.input_features,
... num_points=14)
>>> feature = [f for f in model.input_features if f.startswith('Catalyst_type')]
>>> pdp.plot_1d(feature)
>>> pdp.plot_1d(feature, show_dist_as="grid")
>>> pdp.plot_1d(feature, show_dist=False)
>>> pdp.plot_1d(feature, show_dist=False, ice=False)
>>> pdp.plot_1d(feature, show_dist=False, ice=False, model_expected_value=True)
>>> pdp.plot_1d(feature, show_dist=False, ice=False, feature_expected_value=True)
"""
if isinstance(feature, tuple):
raise NotImplementedError
else:
if self.single_source:
if self.data_is_2d:
pdp_vals, ice_vals = self.calc_pdp_1dim(self.data, feature)
ax = self._plot_pdp_1dim(
pdp_vals,
ice_vals,
self.data, feature,
show_dist=show_dist,
show_dist_as=show_dist_as,
ice=ice,
feature_expected_value=feature_expected_value,
show_ci=show_ci, show_minima=show_minima,
model_expected_value=model_expected_value,
show=self.show,
save=self.save,
ice_only=ice_only,
ice_color=ice_color,
feature_name=feature_name,
ice_lines_kws=ice_lines_kws,
pdp_line_kws=pdp_line_kws,
hist_kws=hist_kws
)
elif self.data_is_3d:
for lb in range(self.data.shape[1]):
pdp_vals, ice_vals = self.calc_pdp_1dim(self.data, feature, lb)
ax = self._plot_pdp_1dim(
pdp_vals,
ice_vals,
data=self.data,
feature=feature,
lookback=lb,
show_ci=show_ci,
show_minima=show_minima,
show_dist=show_dist,
show_dist_as=show_dist_as,
ice=ice,
feature_expected_value=feature_expected_value,
model_expected_value=model_expected_value,
show=self.show,
save=self.save,
ice_only=ice_only,
ice_color=ice_color,
ice_lines_kws=ice_lines_kws,
pdp_line_kws=pdp_line_kws,
hist_kws=hist_kws)
else:
raise ValueError(f"invalid data shape {self.data.shape}")
else:
for data in self.data:
if self.data_is_2d:
ax = self.calc_pdp_1dim(data, feature)
else:
for lb in []:
ax = self.calc_pdp_1dim(data, feature, lb)
return ax
def xv(self, data, feature, lookback=None):
ind = self._feature_to_ind(feature)
if data.ndim == 3:
xv = data[:, lookback, ind]
else:
xv = data[:, ind]
return xv
def grid(self, data, feature, lookback=None):
"""generates the grid for evaluation of model"""
if isinstance(feature, list):
# one hot encoded feature
self.num_points = len(feature)
xs = pd.get_dummies(feature)
return [repeat(xs.iloc[i].values, len(data)) for i in range(len(xs))]
xmin, xmax = compute_bounds(self.xmin,
self.xmax,
self.xv(data, feature, lookback))
return np.linspace(xmin, xmax, self.num_points)
def calc_pdp_1dim(self, data, feature, lookback=None):
"""calculates partial dependence for 1 dimension data"""
ind = self._feature_to_ind(feature)
xs = self.grid(data, feature, lookback)
data_temp = data.copy()
# instead of calling the model for each num_point, prepare the data
# stack it in 'data_all' and call the model only once
total_samples = len(data) * self.num_points
data_all = np.full((total_samples, *data.shape[1:]), np.nan)
pd_vals = np.full(self.num_points, np.nan)
ice_vals = np.full((self.num_points, data.shape[0]), np.nan)
st, en = 0, len(data)
for i in range(self.num_points):
if data.ndim == 3:
data_temp[:, lookback, ind] = xs[i]
else:
data_temp[:, ind] = xs[i]
data_all[st:en] = data_temp
st = en
en += len(data)
predictions = self.model(data_all, **self.kwargs)
st, en = 0, len(data)
for i in range(self.num_points):
pred = predictions[st:en]
pd_vals[i] = pred.mean()
ice_vals[i, :] = pred.reshape(-1, )
st = en
en += len(data)
return pd_vals, ice_vals
def _feature_to_ind(
self,
feature:Union[str, List[str]]
) -> int:
ind = feature
if isinstance(feature, str):
if self.single_source:
ind = self.features.index(feature)
else:
raise NotImplementedError
elif isinstance(feature, list):
ind = [self.features.index(i) for i in feature]
elif not isinstance(feature, int):
raise ValueError
return ind
def _plot_pdp_1dim(
self,
pd_vals,
ice_vals,
data,
feature,
lookback=None,
show_dist=True, show_dist_as="hist",
ice=True, show_ci=False,
show_minima=False,
feature_expected_value=False,
model_expected_value=False,
show=True, save=False, ax=None,
ice_color="lightblue",
ice_only:bool = False,
feature_name:str = None,
pdp_line_kws:dict = None,
ice_lines_kws:dict = None,
hist_kws:dict = None,
):
xmin, xmax = compute_bounds(self.xmin,
self.xmax,
self.xv(data, feature, lookback))
if ax is None:
fig = plt.figure()
ax = fig.add_axes((0.1, 0.3, 0.8, 0.6))
if isinstance(feature, list):
xs = np.arange(len(feature))
if feature_name is None:
feature_name = f"Feature"
else:
if feature_name is None:
feature_name = feature
xs = self.grid(data, feature, lookback)
ylabel = "E[f(x) | " + feature_name + "]"
if ice:
n = ice_vals.shape[1]
if ice_color in plt.colormaps():
colors = plt.get_cmap(ice_color)(np.linspace(0, 0.8, n))
else:
colors = [ice_color for _ in range(n)]
_ice_lines_kws = dict(linewidth=min(1, 50 / n), alpha=1)
if ice_lines_kws is not None:
_ice_lines_kws.update(ice_lines_kws)
for _ice in range(n):
ax.plot(xs, ice_vals[:, _ice], color=colors[_ice],
**_ice_lines_kws)
ylabel = "f(x) | " + feature_name
if show_ci:
std = np.std(ice_vals, axis=1)
upper = pd_vals + std
lower = pd_vals - std
color = '#66C2D7'
if ice_color != "lightblue":
if ice_color not in plt.colormaps():
color = ice_color
ax.fill_between(xs, upper, lower, alpha=0.14, color=color)
# the line plot
_pdp_line_kws = dict(color='blue', linewidth=2, alpha=1)
if not ice_only:
if pdp_line_kws is not None:
_pdp_line_kws.update(pdp_line_kws)
plot(xs, pd_vals, show=False, ax=ax, **_pdp_line_kws)
title = None
if lookback is not None:
title = f"lookback: {lookback}"
process_axis(ax,
ylabel=ylabel,
ylabel_kws=dict(fontsize=20),
right_spine=False,
top_spine=False,
tick_params=dict(labelsize=11),
xlabel=feature_name,
xlabel_kws=dict(fontsize=20),
title=title)
if isinstance(feature, list):
ax.set_xticks(xs)
ax.set_xticklabels(feature, rotation=90)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax2 = ax.twinx()
if show_dist:
_hist_kws = dict(density=False, facecolor='black', alpha=0.1)
if hist_kws is not None:
_hist_kws.update(hist_kws)
xv = self.xv(data, feature, lookback)
if show_dist_as == "hist":
ax2.hist(xv, 50, range=(xmin, xmax), **_hist_kws)
else:
_add_dist_as_grid(fig, xv, other_axes=ax, xlabel=feature,
xlabel_kws=dict(fontsize=20))
process_axis(ax2,
right_spine=False,
top_spine=False,
left_spine=False,
bottom_spine=False,
ylim=(0, data.shape[0]))
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.yaxis.set_ticks([])
if feature_expected_value:
self._add_feature_exp_val(ax2, ax, xmin, xmax, data, feature,
lookback=lookback,
feature_name=feature_name)
if model_expected_value:
self._add_model_exp_val(ax2, ax, data)
if show_minima:
minina = self.model(data, **self.kwargs).min()
ax.axvline(minina, linestyle="--", color="r", lw=1)
if save:
lookback = lookback or ''
fname = os.path.join(self.path, f"pdp_{feature_name}_{lookback}")
plt.savefig(fname, bbox_inches="tight", dpi=400)
if show:
plt.show()
return ax
def _add_model_exp_val(self, ax, original_axis, data):
"""adds model expected value on a duplicate axis of ax"""
model_expected_val = self.model(data, **self.kwargs).mean()
ax2 = ax.twinx()
ymin, ymax = original_axis.get_ylim()
process_axis(ax2, ylim=(ymin, ymax),
yticks=[model_expected_val],
yticklabels=["E[f(x)]"],
right_spine=False,
top_spine=False,
tick_params=dict(length=0, labelsize=11)
)
original_axis.axhline(model_expected_val, color="#999999", zorder=-1,
linestyle="--", linewidth=1)
return
def _add_feature_exp_val(self, ax, original_axis, xmin, xmax, data,
feature,
feature_name=None,
lookback=None):
xv = self.xv(data=data, feature=feature, lookback=lookback)
mval = xv.mean()
ax3 = ax.twiny()
process_axis(ax3,
xlim=(xmin, xmax),
xticks=[mval], xticklabels=["E[" + feature_name + "]"],
tick_params={'length': 0, 'labelsize': 11}, top_spine=False,
right_spine=False)
original_axis.axvline(mval, color="#999999", zorder=-1, linestyle="--",
linewidth=1)
return
def process_axis(
ax: plt.Axes,
title=None, title_kws=None,
xlabel=None, xlabel_kws=None,
ylabel=None, ylabel_kws=None,
xticks=None, xticklabels=None,
yticks=None, yticklabels=None,
tick_params=None,
top_spine=None, right_spine=None, bottom_spine=None, left_spine=None,
xlim=None, ylim=None
):
"""processes a matplotlib axes"""
if title:
title_kws = title_kws or {}
ax.set_title(title, **title_kws)
if ylabel:
ylabel_kws = ylabel_kws or {}
ax.set_ylabel(ylabel, **ylabel_kws)
if xlabel:
xlabel_kws = xlabel_kws or {}
ax.set_xlabel(xlabel, **xlabel_kws)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if xticks:
ax.set_xticks(xticks)
if xticklabels is not None:
ax.set_xticklabels(xticklabels)
if yticks:
ax.set_yticks(yticks)
if yticklabels is not None:
ax.set_yticklabels(yticklabels)
if tick_params:
ax.tick_params(**tick_params)
if top_spine is False:
ax.spines['top'].set_visible(False)
elif top_spine is True:
ax.spines['top'].set_visible(True)
if right_spine is False:
ax.spines['right'].set_visible(False)
elif right_spine is True:
ax.spines['right'].set_visible(True)
if bottom_spine is False:
ax.spines['bottom'].set_visible(False)
if left_spine is False:
ax.spines['left'].set_visible(False)
return
def _add_dist_as_grid(fig: plt.Figure, hist_data, other_axes: plt.Axes,
xlabel=None, xlabel_kws=None, **plot_params):
"""Data point distribution plot for numeric feature"""
ax = fig.add_axes((0.1, 0.1, 0.8, 0.14), sharex=other_axes)
process_axis(ax, top_spine=False, xlabel=xlabel, xlabel_kws=xlabel_kws,
bottom_spine=False,
right_spine=False, left_spine=False)
ax.yaxis.set_visible(False) # hide the yaxis
ax.xaxis.set_visible(False) # hide the x-axis
color = plot_params.get('pdp_color', '#1A4E5D')
ax.plot(hist_data, [1] * len(hist_data), '|', color=color, markersize=20)
return
def _add_surface(ax, x0, x1, pd_vals, cmap, feature0, feature2, **kwargs):
ax.plot_surface(x0, x1, pd_vals, cmap=cmap, **kwargs)
ax.set_xlabel(feature0, fontsize=11)
ax.set_ylabel(feature2, fontsize=11)
ax.set_zlabel(f"E[f(x) | {feature0} {feature2} ]", fontsize=11)
return
def repeat(array, n:int):
# (len(array),) -> (len(array), n)
return np.tile(array, n).reshape(-1, len(array)) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/explain/_partial_dependence.py | _partial_dependence.py |
from typing import Union
from ai4water.backend import np, pd, plt, os, lime
if lime is not None:
from lime import lime_tabular
from ._explain import ExplainerMixin
class LimeExplainer(ExplainerMixin):
"""
Wrapper around LIME module.
Example:
>>> from ai4water import Model
>>> from ai4water.postprocessing import LimeExplainer
>>> from ai4water.datasets import busan_beach
>>> model = Model(model="GradientBoostingRegressor")
>>> model.fit(data=busan_beach())
>>> lime_exp = LimeExplainer(model=model,
... train_data=model.training_data()[0],
... data=model.test_data()[0],
... mode="regression")
>>> lime_exp.explain_example(0)
Attributes:
explaination_objects : location explaination objects for each individual example/instance
"""
def __init__(
self,
model,
data,
train_data,
mode: str,
explainer=None,
path=None,
feature_names: list = None,
verbosity: Union[int, bool] = True,
save: bool = True,
show: bool = True,
**kwargs
):
"""
Arguments:
model :
the model to explain. The model must have `predict` method.
data :
the data to explain. This would typically be test data but it
can be any data.
train_data :
the data on which the model was trained.
mode :
either of `regression` or `classification`
explainer :
The explainer to use. By default, LimeTabularExplainer is used.
path :
path where to save all the plots. By default, plots will be saved in
current working directory.
feature_names :
name/names of features.
verbosity :
whether to print information or not.
show:
whether to show the plot or not
save:
whether to save the plot or not
"""
self.model = model
self.train_data = to_np(train_data)
super(LimeExplainer, self).__init__(path=path or os.getcwd(),
data=to_np(data),
save=save,
show=show,
features=feature_names)
self.mode = mode
self.verbosity = verbosity
self.explainer = self._get_explainer(explainer, **kwargs)
self.explaination_objects = {}
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, x):
if x is not None:
assert x in ["regression", "classification"], f"mode must be either regression or classification not {x}"
self._mode = x
def _get_explainer(self, proposed_explainer=None, **kwargs):
if proposed_explainer is None and self.data.ndim <= 2:
lime_explainer = lime.lime_tabular.LimeTabularExplainer(
self.train_data,
feature_names=self.features,
# class_names=['price'],
# categorical_features=categorical_features,
verbose=self.verbosity,
mode=self.mode,
**kwargs
)
elif proposed_explainer in lime.lime_tabular.__dict__.keys():
lime_explainer = getattr(lime.lime_tabular, proposed_explainer)(
self.train_data,
feature_names=self.features,
mode=self.mode,
verbose=self.verbosity,
**kwargs
)
elif self.data.ndim == 3:
lime_explainer = lime.lime_tabular.RecurrentTabularExplainer(
self.train_data,
mode=self.mode,
feature_names=self.features,
verbose=self.verbosity,
**kwargs
)
elif proposed_explainer is not None:
lime_explainer = getattr(lime, proposed_explainer)(
self.train_data,
features=self.features,
mode=self.mode,
**kwargs
)
else:
raise ValueError(f"Can not infer explainer. Please specify explainer to use.")
return lime_explainer
def __call__(self, *args, **kwargs):
self.explain_all_examples(*args, **kwargs)
return
def explain_all_examples(self,
plot_type="pyplot",
name="lime_explaination",
num_features=None,
**kwargs
):
"""
Draws and saves plot for all examples of test_data.
Arguments:
plot_type :
name :
num_features :
kwargs : any keyword argument for `explain_instance`
An example here means an instance/sample/data point.
"""
for i in range(len(self.data)):
self.explain_example(i, plot_type=plot_type, name=f"{name}_{i}",
num_features=num_features, **kwargs)
return
def explain_example(
self,
index: int,
plot_type: str = "pyplot",
name: str = "lime_explaination",
num_features: int = None,
colors=None,
annotate=False,
**kwargs
)->plt.Figure:
"""
Draws and saves plot for a single example of test_data.
Arguments:
index : index of test_data
plot_type : either pyplot or html
name : name with which to save the file
num_features :
colors :
annotate : whether to annotate figure or not
kwargs : any keyword argument for `explain_instance`
Returns:
matplotlib figure if plot_type="pyplot" and show is False.
"""
assert plot_type in ("pyplot", "html")
exp = self.explainer.explain_instance(self.data[index],
self.model.predict,
num_features=num_features or len(self.features),
**kwargs
)
self.explaination_objects[index] = exp
fig = None
if plot_type == "pyplot":
plt.close()
fig = as_pyplot_figure(exp, colors=colors, example_index=index, annotate=annotate)
if self.save:
plt.savefig(os.path.join(self.path, f"{name}_{index}"), bbox_inches="tight")
if self.show:
plt.show()
else:
exp.save_to_file(os.path.join(self.path, f"{name}_{index}"))
return fig
def to_np(x) -> np.ndarray:
if isinstance(x, pd.DataFrame):
x = x.values
else:
assert isinstance(x, np.ndarray)
return x
def as_pyplot_figure(
inst_explainer,
label=1,
example_index=None,
colors: [str, tuple, list] = None,
annotate=False,
**kwargs):
"""Returns the explanation as a pyplot figure.
Will throw an error if you don't have matplotlib installed
Args:
inst_explainer : instance explainer
label: desired label. If you ask for a label for which an
explanation wasn't computed, will throw an exception.
Will be ignored for regression explanations.
colors : if tuple it must be names of two colors for +ve and -ve
example_index :
annotate : whether to annotate the figure or not?
kwargs: keyword arguments, passed to domain_mapper
Returns:
pyplot figure (barchart).
"""
textstr = f"""Prediction: {round(inst_explainer.predicted_value, 2)}
Local prediction: {round(inst_explainer.local_pred.item(), 2)}"""
if colors is None:
colors = ([0.9375, 0.01171875, 0.33203125], [0.23828125, 0.53515625, 0.92578125])
elif isinstance(colors, str):
colors = (colors, colors)
exp = inst_explainer.as_list(label=label, **kwargs)
fig = plt.figure()
vals = [x[1] for x in exp]
names = [x[0] for x in exp]
vals.reverse()
names.reverse()
if isinstance(colors, tuple):
colors = [colors[0] if x > 0 else colors[1] for x in vals]
pos = np.arange(len(exp)) + .5
h = plt.barh(pos, vals, align='center', color=colors)
plt.yticks(pos, names)
if inst_explainer.mode == "classification":
title = 'Local explanation for class %s' % inst_explainer.class_names[label]
else:
title = f'Local explanation for example {example_index}'
plt.title(title)
plt.grid(linestyle='--', alpha=0.5)
if annotate:
# https://stackoverflow.com/a/59109053/5982232
plt.legend(h, [textstr], loc="best",
fancybox=True, framealpha=0.7,
handlelength=0, handletextpad=0)
return fig | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/explain/_lime.py | _lime.py |
from ai4water.backend import sklearn_models
def convert_ai4water_model(old_model, framework=None, explainer=None):
"""convert ai4water's Model class to sklearn/xgboost..etc type model classes
"""
new_model = old_model
model_name = old_model.__class__.__name__
if old_model.__class__.__name__ == "Model" and "ai4water" in str(type(old_model)):
# this is ai4water model class
if old_model.category == "ML":
model_name = list(old_model.config['model'].keys())[0]
new_model, _explainer = to_native(old_model, model_name)
explainer = explainer or _explainer
framework = "ML"
else:
framework = "DL"
explainer = explainer or "DeepExplainer"
if 'functional' in str(type(old_model)):
new_model = functional_to_keras(old_model)
return new_model, framework, explainer, model_name
def to_native(model, model_name:str):
# because transformations are part of Model in ai4water, and TreeExplainer
# is based upon on tree structure, it will not consider ransformation as part of Model
if model.config['x_transformation']or model.config['y_transformation']:
explainer = "KernelExplainer"
else:
explainer = "TreeExplainer"
if model_name.startswith("XGB"):
import xgboost
BaseModel = xgboost.__dict__[model_name]
elif model_name.startswith("LGB"):
import lightgbm
BaseModel = lightgbm.__dict__[model_name]
elif model_name.startswith("Cat"):
import catboost
BaseModel = catboost.__dict__[model_name]
elif model_name in sklearn_models:
BaseModel = sklearn_models[model_name]
explainer = "KernelExplainer"
else:
raise ValueError
class DummyModel(BaseModel):
"""First priority is to get attribute from ai4water's Model and then from
the underlying library's model class."""
def __getattribute__(self, item):
return getattr(model, item)
def __getattr__(self, item):
return getattr(model._model, item)
return DummyModel(), explainer
def get_features(features, features_to_explain):
if features_to_explain is not None:
if isinstance(features_to_explain, str):
features_to_explain = [features_to_explain]
else:
features_to_explain = features
assert isinstance(features_to_explain, list)
for f in features_to_explain:
assert f in features
return features_to_explain
def functional_to_keras(old_model):
"""converts the model of functional api to keras model"""
assert old_model.config['x_transformation'] is None
assert old_model.config['y_transformation'] is None
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Flatten
# keras model from functional api
old_model = old_model._model
old_m_outputs = old_model.outputs
if isinstance(old_m_outputs, list):
assert len(old_m_outputs) == 1
old_m_outputs = old_m_outputs[0]
if len(old_m_outputs.shape) > 2: # (None, ?, ?)
new_outputs = Flatten()(old_m_outputs) # (None, ?)
assert new_outputs.shape.as_list()[-1] == 1 # (None, 1)
new_model = Model(old_model.inputs, new_outputs)
else: # (None, ?)
assert old_m_outputs.shape.as_list()[-1] == 1 # (None, 1)
new_model = old_model
return new_model | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/explain/utils.py | utils.py |
from typing import Union
from ...preprocessing import DataSet
from ._shap import ShapExplainer, shap
from ._lime import LimeExplainer, lime
from ..utils import choose_examples
from .utils import convert_ai4water_model, get_features
from ai4water.backend import os
def explain_model(
model,
data_to_expalin=None,
train_data=None,
total_data=None,
features_to_explain: Union[str, list] = None,
examples_to_explain: Union[int, float, list] = 0,
explainer=None,
layer: Union[str, int] = None,
method: str = "both"
):
"""
Explains the ai4water's Model class.
Arguments:
model : the AI4Water's model to explain
features_to_explain : the input features to explain. It must be a string
or a list of strings where a string is a feature name.
examples_to_explain : the examples to explain. If integer, it will be
the number/index of example to explain. If float, it will be fraction
of values to explain. If list/array, it will be index of examples
to explain. The examples are choosen which have highest variance
in prediction.
explainer : the explainer to use. If None, it will be inferred based
upon the model type.
layer : layer to explain. Only relevant if the model consits of layers
of neural networks. If integer, it will be the number of layer
to explain. If string, it will be name of layer of to explain.
method : either 'both', 'shap' or 'lime'. If both, then the model will
be explained using both lime and shap methods.
Returns:
if `method`==both, it will return a tuple of LimeExplainer and ShapExplainer
otherwise it will return the instance of either LimeExplainer or ShapExplainer.
Example:
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> from ai4water.postprocessing.explain import explain_model
>>> model = Model(model="RandomForestRegressor")
>>> model.fit(data=busan_beach())
>>> explain_model(model, total_data=busan_beach())
"""
data = {'data_to_explain': data_to_expalin,
'train_data': train_data,
'total_data': total_data}
if method == 'both':
exp1 = _explain_with_lime(model=model, examples_to_explain=examples_to_explain, **data)
exp2 = _explain_with_shap(model,
features_to_explain=features_to_explain,
examples_to_explain=examples_to_explain,
explainer=explainer, layer=layer, **data)
explainer = (exp1, exp2)
elif method == 'shap' and shap:
explainer = _explain_with_shap(model,
features_to_explain=features_to_explain,
examples_to_explain=examples_to_explain,
explainer=explainer, layer=layer, **data)
elif method == 'lime' and lime:
explainer = _explain_with_lime(model=model, examples_to_explain=examples_to_explain, **data)
else:
ValueError(f"unrecognized method {method}")
return explainer
def explain_model_with_lime(
model,
data_to_explain=None,
train_data=None,
total_data=None,
examples_to_explain: Union[int, float, list] = 0,
) -> "LimeExplainer":
"""Explains the model with LimeExplainer
Parameters
----------
data_to_explain :
the data to explain
train_data :
the data used for training.
total_data :
total data from which training and test data will be extracted.
This is only required if data_to_explain/train data is not given.
model :
the AI4Water's model to explain
examples_to_explain :
the examples to explain
Returns
-------
an instance of [LimeExplainer][ai4water.postprocessing.explain.LimeExplainer]
Example
-------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> from ai4water.postprocessing.explain import explain_model_with_lime
>>> model = Model(model="RandomForestRegressor")
>>> model.fit(data=busan_beach())
>>> explain_model_with_lime(model, total_data=busan_beach())
"""
if total_data is None:
train_x = train_data
test_x = data_to_explain
test_y = None
else:
assert total_data is not None
train_x, _ = model.training_data(data=total_data)
test_x, test_y = model.test_data(data=total_data)
features = model.input_features
lime_exp_path = maybe_make_path(os.path.join(model.path, "explainability", "lime"))
test_x, index = choose_examples(test_x, examples_to_explain, test_y)
mode = model.mode
verbosity = model.verbosity
if model.lookback > 1:
explainer = "RecurrentTabularExplainer"
else:
explainer = "LimeTabularExplainer"
model, _, _, _ = convert_ai4water_model(model)
if mode == "classification":
return
explainer = LimeExplainer(model,
data=test_x,
train_data=train_x,
path=lime_exp_path,
feature_names=features,
explainer=explainer,
mode=mode,
verbosity=verbosity,
show=False
)
for i in range(explainer.data.shape[0]):
explainer.explain_example(i, name=f"lime_exp_for_{index[i]}")
return explainer
def explain_model_with_shap(
model,
data_to_explain=None,
train_data=None,
total_data=None,
features_to_explain: Union[str, list] = None,
examples_to_explain: Union[int, float, list] = 0,
explainer=None,
layer: Union[str, int] = None,
plot_name="summary",
) -> "ShapExplainer":
"""Expalins the model which is built by AI4Water's Model class using SHAP.
Parameters
----------
model :
the model to explain.
data_to_explain :
the data to explain. If given, then ``train_data`` must be given as well.
If not given then ``total_data`` must be given.
train_data :
the data on which model was trained. If not given, then ``total_data`` must
be given.
total_data :
raw unpreprocessed data from which train and test data will be extracted.
The explanation will be done on test data. This is only required if
data_to_explain and train_data are not given.
features_to_explain :
the features to explain.
examples_to_explain :
the examples to explain. If integer, it will be
the number of examples to explain. If float, it will be fraction
of values to explain. If list/array, it will be index of examples
to explain. The examples are choosen which have highest variance
in prediction.
explainer :
the explainer to use
layer :
layer to explain.
plot_name :
name of plot to draw
Returns
-------
an instance of ShapExplainer
Examples
--------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> from ai4water.postprocessing.explain import explain_model_with_shap
>>> model = Model(model="RandomForestRegressor")
>>> model.fit(data=busan_beach())
>>> explain_model_with_shap(model, total_data=busan_beach())
"""
assert hasattr(model, 'path')
if data_to_explain is None:
assert total_data is not None
train_x, _ = model.training_data(data=total_data)
data_to_explain, test_y = model.test_data(data=total_data)
else:
assert train_data is not None
assert data_to_explain is not None
train_x = train_data
data_to_explain = data_to_explain
test_y = None
features = model.input_features
shap_exp_path = maybe_make_path(os.path.join(model.path, "explainability", "shap"))
if not isinstance(model.dh_, DataSet):
raise NotImplementedError
features_to_explain = get_features(features, features_to_explain)
model, framework, _explainer, _ = convert_ai4water_model(model)
if framework == "DL":
layer = layer or 2
explainer = explainer or _explainer
if examples_to_explain is None:
examples_to_explain = 0
data_to_explain, index = choose_examples(data_to_explain, examples_to_explain, test_y)
explainer = ShapExplainer(model=model,
data=data_to_explain,
train_data=train_x,
explainer=explainer,
path=shap_exp_path,
framework=framework,
feature_names=features_to_explain,
layer=layer,
show=False
)
if plot_name == "all":
for i in range(explainer.data.shape[0]):
explainer.force_plot_single_example(i, f"force_plot_{index[i]}")
explainer.summary_plot()
explainer.plot_shap_values()
else:
explainer.plot_shap_values()
return explainer
def _explain_with_lime(*args, **kwargs):
explainer = None
if lime:
explainer = explain_model_with_lime(*args, **kwargs)
return explainer
def _explain_with_shap(*args, **kwargs):
explainer = None
if shap:
explainer = explain_model_with_shap(*args, **kwargs)
return explainer
def maybe_make_path(path):
if not os.path.exists(path):
os.makedirs(path)
return path | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/explain/explainer_helper_funcs.py | explainer_helper_funcs.py |
import warnings
from ai4water.backend import xgboost, tf, np, pd, mpl, plt, os
from ai4water.backend import easy_mpl as ep
from ai4water.utils.visualizations import Plot
from ai4water.utils.utils import plot_activations_along_inputs
class Interpret(Plot):
"""Interprets the ai4water Model.
The ``Interpret`` class is different than the methods in ``explain`` module.
The ``Interpret`` class explains the behaviour of the model by using consituents
of the model itself for example attention weights or feature importance.
"""
def __init__(self, model, save:bool = False, show:bool = True):
"""
Arguments
---------
model :
an instance of ai4water's Model
save : bool
show : bool
"""
self.model = model
self.save = save
self.show = show
super().__init__(model.path)
if self.model.category.upper() == "DL":
if hasattr(model, 'interpret') and not model.__class__.__name__ == "Model":
model.interpret()
elif self.model.category == 'ML':
use_xgb = False
if self.model._model.__class__.__name__ == "XGBRegressor":
use_xgb = True
self.plot_feature_importance(use_xgb = use_xgb)
@property
def model(self):
return self._model
@model.setter
def model(self, x):
self._model = x
def feature_importance(self):
if self.model.category.upper() == "ML":
estimator = self.model._model
if not is_fitted(estimator):
print(f"the model {estimator} is not fitted yet so not feature importance")
return
model_name = list(self.model.config['model'].keys())[0]
if model_name.upper() in ["SVC", "SVR"]:
if estimator.kernel == "linear":
# https://stackoverflow.com/questions/41592661/determining-the-most-contributing-features-for-svm-classifier-in-sklearn
return estimator.coef_
elif hasattr(estimator, "feature_importances_"):
return estimator.feature_importances_
def f_importances_svm(self, coef, names):
plt.close('all')
mpl.rcParams.update(mpl.rcParamsDefault)
classes = coef.shape[0]
features = coef.shape[1]
_, axis = plt.subplots(classes, sharex='all')
axis = axis if hasattr(axis, "__len__") else [axis]
for idx, ax in enumerate(axis):
# colors = ['red' if c < 0 else 'blue' for c in self._model.coef_[idx]]
ax.bar(range(features), self._model.coef_[idx], 0.4)
plt.xticks(ticks=range(features), labels=self.model.input_features, rotation=90, fontsize=12)
self.save_or_show(save=self.save, fname=f"{list(self.model.config['model'].keys())[0]}_feature_importance")
return
def plot_feature_importance(
self,
importance=None,
use_xgb=False,
max_num_features=20,
figsize=None,
**kwargs):
"""
plots feature importance when the model is tree based.
"""
figsize = figsize or (8, 8)
if importance is None:
importance = self.feature_importance()
if self.model.category == "ML":
model_name = list(self.model.config['model'].keys())[0]
if model_name.upper() in ["SVC", "SVR"]:
if self.model._model.kernel == "linear":
return self.f_importances_svm(importance, self.model.input_features)
else:
warnings.warn(f"for {self.model._model.kernel} kernels of {model_name}, feature "
f"importance can not be plotted.")
return
if isinstance(importance, np.ndarray):
assert importance.ndim <= 2
if importance is None:
return
all_cols = self.model.input_features + self.model.output_features
if self.model.teacher_forcing:
all_cols = self.model.input_features
imp_sort = np.sort(importance)[::-1]
all_cols = np.array(all_cols)
all_cols = all_cols[np.argsort(importance)[::-1]]
# save the whole importance before truncating it
fname = os.path.join(self.model.path, 'feature_importance.csv')
pd.DataFrame(imp_sort, index=all_cols,
columns=['importance_sorted']).to_csv(fname)
imp = np.concatenate([imp_sort[0:max_num_features], [imp_sort[max_num_features:].sum()]])
all_cols = list(all_cols[0:max_num_features]) + [f'rest_{len(all_cols) - max_num_features}']
if use_xgb:
self._feature_importance_xgb(max_num_features=max_num_features)
else:
plt.close('all')
_, axis = plt.subplots(figsize=figsize)
ep.bar_chart(labels=all_cols,
values=imp,
ax=axis,
ax_kws={'title':"Feature importance",
'xlabel_kws': {'fontsize': 12}},
show=False)
self.save_or_show(save=self.save, show=self.show,
fname="feature_importance.png")
return
def _feature_importance_xgb(self, max_num_features=None, **kwargs):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.plot_importance
if xgboost is None:
warnings.warn("install xgboost to plot plot_importance using xgboost", UserWarning)
else:
booster = self.model._model.get_booster()
booster.feature_names = self.model.input_features
plt.close('all')
# global feature importance with xgboost comes with different types
xgboost.plot_importance(booster, max_num_features=max_num_features)
self.save_or_show(save=self.save, show=self.show,
fname="feature_importance_weight.png")
plt.close('all')
xgboost.plot_importance(booster, importance_type="cover",
max_num_features=max_num_features, **kwargs)
self.save_or_show(save=self.save, show=self.show,
fname="feature_importance_type_cover.png")
plt.close('all')
xgboost.plot_importance(booster, importance_type="gain",
max_num_features=max_num_features, **kwargs)
self.save_or_show(save=self.save, show=self.show,
fname="feature_importance_type_gain.png")
return
def compare_xgb_f_imp(
self,
calculation_method="all",
rescale=True,
figsize:tuple=None,
backend:str = 'matplotlib',
**kwargs
):
"""compare various feature importance calculations methods that are built
in in XGBoost"""
inp_features = self.model.input_features
assert isinstance(inp_features, list)
booster = self.model._model.get_booster()
booster.feature_names = self.model.input_features
_importance_types = ['weight', 'gain', 'cover', 'total_gain', 'total_cover']
importance_types = _importance_types.copy()
if calculation_method != "all":
if isinstance(calculation_method, str):
calculation_method = [calculation_method]
assert isinstance(calculation_method, list)
# remove those which are not desired
for imp in _importance_types:
if imp not in calculation_method:
importance_types.remove(imp)
# container to hold importances with each method
importance = []
for idx, imp_type in enumerate(importance_types):
score = pd.Series(booster.get_score(importance_type=imp_type))
score = pd.DataFrame(score, columns=[imp_type])
if rescale:
# so that the sum of all feature importance is 1.0 and the
# scale is relative
score = score / score.sum()
importance.append(score)
importance = pd.concat(importance, axis=1)
if backend=="plotly":
width = figsize[0] if figsize else 1200
height = figsize[1] if figsize else 1200
return xgb_fimp_with_plotly(importance,
importance_types,
fig_width=width,
fig_height=height,
path=self.model.path)
plt.close('all')
fig, axis = plt.subplots(importance.shape[1],
sharex="all",
figsize=figsize)
for ax, imp in zip(axis.flat, importance.columns):
ax = ep.bar_chart(
importance[imp],
labels=importance.index,
orient="vertical",
show=False,
rotation=90,
label=imp,
ax=ax,
**kwargs)
ax.legend()
fname = os.path.join(self.model.path, "xgb_f_imp_comp")
if self.save:
plt.savefig(fname, bbox_inches="tight")
if self.show:
plt.show()
return fig
def tft_attention_components(
self,
x = None,
data=None,
data_type:str = "test",
):
"""
Gets attention components of tft layer from ai4water's Model.
Parameters
----------
x :
the input data to the model
data :
raw data from which ``x``/inputs are extracted.
data_type :
the data to use to calculate attention components
Returns
-------
dict
dictionary containing attention components of tft as numpy arrays.
Following four attention components are present in the dictionary
- decoder_self_attn: (attention_heads, ?, total_time_steps, 22)
- static_variable_selection_weights:
- encoder_variable_selection_weights: (?, encoder_steps, input_features)
- decoder_variable_selection_weights: (?, decoder_steps, input_features)
str
a string indicating which data was used
"""
maybe_create_path(self.model.path)
if x is None:
x, _, = getattr(self.model, f'{data_type}_data')(data=data)
if len(x) == 0 and data_type == "test":
warnings.warn("No test data found. using validation data instead",
UserWarning)
x, _, = getattr(self.model, 'validation_data')(data=data)
assert len(x) >0
attentions = self.model.TemporalFusionTransformer_attentions
if self.model.api == 'subclassing':
inputs = self.model.inputs
else:
inputs = self.model._model.inputs
attention_components = {}
for k, v in attentions.items():
if v is not None:
temp_model = tf.keras.Model(inputs=inputs,
outputs=v)
attention_components[k] = temp_model.predict(x=x, verbose=0, steps=1)
return attention_components, data
def get_enc_var_selection_weights(self, data, data_type:str='test'):
"""Returns encoder variable selection weights of TFT model"""
ac, _ = self.tft_attention_components(data=data, data_type=data_type)
return ac['encoder_variable_selection_weights']
def interpret_example_tft(
self,
example_index:int,
x = None,
data=None,
data_type='test'
):
"""interprets a single example using TFT model.
Parameters
---------
example_index : int
index of example to be explained
x :
input data, if not given, ``data`` must be given
data :
the data whose example to interpret.
data_type : str
either ``training``, ``test``, ``validation`` or ``all``.
It is only useful when ``data`` argument is used.
"""
assert data_type in ("training", "test", "validation", "all")
if x is None:
data_name = data_type
else:
data_name = "data"
enc_var_selection_weights = self.get_enc_var_selection_weights(
data=data, data_type=data_type)
plt.close('all')
im = ep.imshow(
enc_var_selection_weights[example_index],
aspect="auto",
ax_kws=dict(title=example_index,
ylabel="lookback steps"),
show=False
)
plt.xticks(np.arange(self.model.num_ins), self.model.input_features,
rotation=90)
plt.colorbar(im, orientation='vertical', pad=0.05)
fname = os.path.join(maybe_create_path(self.model.path),
f'{data_name}_enc_var_selec_{example_index}.png')
if self.save:
plt.savefig(fname, bbox_inches='tight', dpi=300)
if self.show:
plt.show()
return
def interpret_tft(
self,
x=None,
y=None,
data=None,
data_type="test"
):
"""global interpretation of TFT model.
Arguments:
x :
input data. If not given, ``data`` argument must be given.
y :
labels/target/true data corresponding to ``x``. It is only
used for plotting.
data :
the data to use to interpret model. It is only required
when ``x`` is not given.
data_type :
either ``training``, ``test``, ``validation`` or ``all``.
It is only useful when ``data`` argument is used.
"""
if x is None:
predictions = getattr(self.model, f"predict_on_{data_type}_data")(
data=data,
process_results=False, verbose=0)
x, y, = getattr(self.model, f'{data_type}_data')(data=data)
else:
predictions = self.model.predict(x=x, verbose=0)
ac, data = self.tft_attention_components(data=data)
encoder_variable_selection_weights = ac['encoder_variable_selection_weights']
plot_activations_along_inputs(
activations=encoder_variable_selection_weights,
data=x[:, -1],
observations=y,
predictions=predictions,
in_cols=self.model.input_features,
out_cols=self.model.output_features,
lookback=self.model.lookback,
name=f'tft_encoder_weights_{data}',
path=maybe_create_path(self.model.path)
)
return
def interpret_attention_lstm(
self,
x=None,
data = None,
data_type:str = "test"
):
"""
Arguments:
x :
input data. If not given, ``data`` argument must be given.
data :
the data to use to interpret model. It is only required
when ``x`` is not given.
data_type :
either ``training``, ``test``, ``validation`` or ``all``.
It is only useful when ``data`` argument is used.
"""
raise NotImplementedError
def interpret_tab_transformer(
self,
x=None,
data = None,
data_type:str = "test"
):
"""
Arguments:
x :
input data. If not given, ``data`` argument must be given.
data :
the data to use to interpret model. It is only required
when ``x`` is not given.
data_type :
either ``training``, ``test``, ``validation`` or ``all``.
It is only useful when ``data`` argument is used.
"""
raise NotImplementedError
def interpret_ft_transformer(
self,
x=None,
data = None,
data_type:str = "test"
):
"""
Arguments:
x :
input data. If not given, ``data`` argument must be given.
data :
the data to use to interpret model. It is only required
when ``x`` is not given.
data_type :
either ``training``, ``test``, ``validation`` or ``all``.
It is only useful when ``data`` argument is used.
"""
raise NotImplementedError
def xgb_fimp_with_plotly(
importance:pd.DataFrame,
importance_types,
fig_width,
fig_height,
path,
):
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# initiate figure with subplots
fig = make_subplots(
rows=len(importance_types) + 1, cols=1,
vertical_spacing=0.02
# shared_xaxes=True
)
for idx, col in enumerate(importance.columns):
fig.add_trace(go.Bar(
x=importance.index.tolist(),
y=importance[col],
name=col
), row=idx + 1, col=1)
fig.update_xaxes(showticklabels=False) # hide all the xticks
fig.update_xaxes(showticklabels=True,
row=len(importance_types),
col=1,
tickangle=-45,
title="Input Features"
)
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig.update_layout(
height=fig_height,
width=fig_width,
legend_title="Calculation Method",
title_text="XGBoost Feature Importance",
title_x=0.42,
font=dict(
family="Times New Roman",
size=26,
)
)
fname = os.path.join(path, "xgb_f_imp_comp.html")
fig.write_html(fname)
return fig
def maybe_create_path(path):
path = os.path.join(path, "interpret")
if not os.path.exists(path):
os.makedirs(path)
return path
def is_fitted(estimator):
if hasattr(estimator, 'is_fitted'): # for CATBoost
return estimator.is_fitted
attrs = [v for v in vars(estimator)
if v.endswith("_") and not v.startswith("__")]
if not attrs:
return False
return True | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/interpret/_main.py | _main.py |
import warnings
from typing import Union
from ai4water.backend import easy_mpl as ep
from ai4water.backend import tf, keras, np, plt, os, random, lightgbm, xgboost, sklearn
if tf is not None:
import ai4water.keract_mod as keract
else:
keract = None
from ai4water.utils.plotting_tools import Plots
from ai4water.utils.utils import maybe_three_outputs, get_nrows_ncols
from ..utils import choose_examples
try:
from ai4water.utils.utils_from_see_rnn import rnn_histogram
except ModuleNotFoundError:
rnn_histogram = None
try:
from dtreeviz import trees
except ModuleNotFoundError:
trees = None
RNN_INFO = {"LSTM": {'rnn_type': 'LSTM',
'gate_names': ['INPUT', 'FORGET', 'CELL', 'OUTPUT'],
'n_gates': 4,
'is_bidir': False,
'rnn_dim': 64,
'uses_bias': True,
'direction_names': [[]]
}
}
CMAPS = [
'jet_r', 'ocean_r', 'viridis_r', 'BrBG',
'GnBu',
#'crest_r',
'Blues_r', 'bwr_r',
#'flare',
'YlGnBu'
]
TREE_BASED_MODELS = [
"DecisionTreeRegressor",
"ExtraTreeRegressor",
"XGBRFRegressor",
"XGBRegressor",
"CatBoostRegressor",
"LGBMRegressor",
"DecisionTreeClassifier",
"ExtraTreeClassifier",
"XGBClassifier",
"XGBRFClassifier",
"CatBoostClassifier",
"LGBMClassifier"
]
class Visualize(Plots):
"""Hepler class to peek inside the machine learning mdoel.
If the machine learning model consists of layers of neural networks,
then this class can be used to plot following 4 items
- outputs of individual layers
- gradients of outputs of individual layers
- weights and biases of individual layers
- gradients of weights of individual layers
If the machine learning model consists of tree, then this
class can be used to plot the learned tree of the model.
methods
-------
- get_activations
- activations
- get_activation_gradients
- activation_gradients
- get_weights
- weights
- get_weight_gradients
- weight_gradients
- decision_tree
"""
def __init__(
self,
model,
save=True,
show=True,
verbosity=None,
):
"""
Arguments:
model :
the learned machine learning model.
save : bool
show : bool
verbosity : bool/int
"""
plt.rcParams.update(plt.rcParamsDefault)
self.model = model
self.verbosity = model.verbosity if verbosity is None else verbosity
self.save=save
self.show=show
self.vis_path = os.path.join(model.path, "visualize")
if not os.path.exists(self.vis_path):
os.makedirs(self.vis_path)
Plots.__init__(self,
path=self.vis_path,
config=model.config)
def __call__(
self,
layer_name,
data=None,
data_type='training',
x=None,
y=None,
examples_to_use=None
):
if self.model.category == "DL":
self.activations(layer_name,
data=data,
data_type=data_type,
x=x,
examples_to_use=examples_to_use)
self.activation_gradients(layer_name,
x=x, y=y,
data=data,
data_type=data_type,
examples_to_use=examples_to_use)
self.weights(layer_name)
self.weight_gradients(layer_name, data=data, data_type=data_type, x=x, y=y)
else:
self.decision_tree()
self.decision_tree_leaves(data=data, data_type=data_type)
return
def get_activations(
self,
layer_names: Union[list, str] = None,
x=None,
data = None,
data_type: str = 'training',
batch_size:int=None,
) -> dict:
"""
gets the activations/outputs of any layer of the Keras Model.
Arguments:
layer_names : name of list of names of layers whose activations are
to be returned.
x :
The input that will be fed to NN to extract activations. If provided,
it will override `data`.
data :
raw unprepared data from which will be forwarded to
:py:meth:`ai4water.preprocessing.DataSet` to extract input
data_type : str
either ``training``, ``validation`` or ``test``. Only relevant
if ``data`` argument is active
batch_size : int
Returns:
a dictionary whose keys are names of layers and values are weights
of those layers as numpy arrays
"""
if x is None:
x, y = self._get_xy_from_data(data=data, data_type=data_type)
if self.model.api == "subclassing":
dl_model = self.model
else:
dl_model = self.model._model
if isinstance(x, list):
num_examples = len(x[0])
elif isinstance(x, np.ndarray):
num_examples = len(x)
else:
raise ValueError
if batch_size:
# feed each batch and get activations per batch
assert isinstance(layer_names, str)
_activations = []
for batch in range(num_examples // batch_size):
batch_x = _get_batch_input(x, batch, batch_size)
batch_activations = keract.get_activations(
dl_model,
batch_x,
layer_names=layer_names,
auto_compile=True)
assert len(batch_activations) == 1 # todo
_activations.append(list(batch_activations.values())[0])
activations = {layer_names: np.concatenate(_activations)}
else:
activations = keract.get_activations(dl_model, x,
layer_names=layer_names,
auto_compile=True)
return activations
def activations(
self,
layer_names=None,
x=None,
data = None,
data_type:str = "training",
examples_to_use: Union[int, list, np.ndarray, range] = None,
**kwargs
):
"""
Plots outputs of any layer of neural network.
Arguments:
x :
if given, will override, 'data'.
data :
raw unprepared data from which will be forwarded to
:py:meth:`ai4water.preprocessing.DataSet` to extract input
data_type : str
either ``training``, ``validation`` or ``test``. Only relevant
if ``data`` argument is active
layer_names :
name of layer whose output is to be plotted. If None,
it will plot outputs of all layers
examples_to_use :
If integer, it will be the number of examples to use.
If array like, it will be the indices of examples to use.
"""
activations = self.get_activations(x=x, data=data, data_type=data_type)
if layer_names is not None:
if isinstance(layer_names, str):
layer_names = [layer_names]
else:
layer_names = layer_names
else:
layer_names = list(activations.keys())
assert isinstance(layer_names, list)
if self.verbosity > 0:
print("Plotting activations of layers")
for lyr_name, activation in activations.items():
if lyr_name in layer_names:
# activation may be tuple e.g if input layer receives more than
# 1 input
if isinstance(activation, np.ndarray):
if activation.ndim == 2 and examples_to_use is None:
examples_to_use = range(len(activation)-1)
self._plot_activations(
activation,
lyr_name,
examples_to_use,
**kwargs
)
elif isinstance(activation, tuple):
for act in activation:
self._plot_activations(act, lyr_name, **kwargs)
return
def _plot_activations(self,
activation,
lyr_name,
examples_to_use=None,
**kwargs):
if examples_to_use is None:
indices = range(len(activation))
else:
activation, indices = choose_examples(activation, examples_to_use)
if kwargs is None:
kwargs = {}
if "LSTM" in lyr_name.upper() and np.ndim(activation) in (2, 3):
if activation.ndim == 3:
self.features_2d(activation,
show=self.show,
name=lyr_name + "_outputs",
sup_title="Activations",
n_rows=6,
sup_xlabel="LSTM units",
sup_ylabel="Lookback steps",
title=indices,
)
else:
self._imshow(activation, f"{lyr_name} Activations",
fname=lyr_name,
show=self.show,
ylabel="Examples", xlabel="LSTM units",
cmap=random.choice(CMAPS))
elif np.ndim(activation) == 2 and activation.shape[1] > 1:
if "lstm" in lyr_name.lower():
kwargs['xlabel'] = "LSTM units"
self._imshow(activation, lyr_name + " Activations",
show=self.show,
fname=lyr_name, **kwargs)
elif np.ndim(activation) == 3:
if "input" in lyr_name.lower():
kwargs['xticklabels'] = self.model.input_features
self._imshow_3d(activation, lyr_name, save=self.show, **kwargs,
where='')
elif np.ndim(activation) == 2: # this is now 1d
# shape= (?, 1)
self.plot1d(activation,
label=lyr_name + ' Outputs',
show=self.show,
fname=lyr_name + '_outputs',
**kwargs
)
else:
print("""
ignoring activations for {} because it has shape {}, {}""".format(
lyr_name, activation.shape,
np.ndim(activation)))
return
def get_weights(self)->dict:
""" returns all trainable weights as arrays in a dictionary"""
weights = {}
for weight in self.model.trainable_weights:
if tf.executing_eagerly():
weights[weight.name] = weight.numpy()
else:
weights[weight.name] = keras.backend.eval(weight)
return weights
def weights(
self,
layer_names: Union[str, list] = None,
**kwargs
):
"""Plots the weights of a specific layer or all layers.
Arguments:
layer_names : The layer whose weights are to be viewed.
"""
weights = self.get_weights()
if self.verbosity > 0:
print("Plotting trainable weights of layers of the model.")
if layer_names is None:
layer_names = list(weights.keys())
elif isinstance(layer_names, str):
layer_names = [layer_names]
else:
layer_names = layer_names
for lyr in layer_names:
for _name, weight in weights.items():
if lyr in _name:
title = _name
fname = _name + '_weights'
rnn_args = None
if "LSTM" in title.upper():
rnn_args = {'n_gates': 4,
'gate_names_str': "(input, forget, cell, output)"}
if np.ndim(weight) == 2 and weight.shape[1] > 1:
self._imshow(weight, title, show=self.show, fname=fname,
rnn_args=rnn_args)
elif len(weight) > 1 and np.ndim(weight) < 3:
self.plot1d(weight,
title,
self.show,
fname,
rnn_args=rnn_args,
**kwargs)
elif "conv" in _name.lower() and np.ndim(weight) == 3:
_name = _name.replace("/", "_")
_name = _name.replace(":", "_")
self.features_2d(data=weight,
save=self.show,
name=_name,
slices=64,
slice_dim=2,
tight=True,
borderwidth=1)
else:
print("""ignoring weight for {} because it has shape {}
""".format(_name, weight.shape))
return
def get_activation_gradients(
self,
layer_names: Union[str, list] = None,
x=None,
y=None,
data = None,
data_type:str = "training",
) -> dict:
"""
Finds gradients of outputs of a layer.
either x,y or data is required
Arguments:
layer_names :
The layer for which, the gradients of its outputs are
to be calculated.
x :
input data. Will overwrite `data`
y :
corresponding label of x. Will overwrite `data`.
data :
raw unprepared data from which will be forwarded to
:py:meth:`ai4water.preprocessing.DataSet` to extract x and y
data_type : str
either ``training``, ``validation`` or ``test``. Only relevant
if ``data`` argument is active
"""
if isinstance(layer_names, str):
layer_names = [layer_names]
if x is None:
x, y = self._get_xy_from_data(data=data, data_type=data_type)
from ai4water.functional import Model as FModel
if isinstance(self.model, FModel):
model = self.model._model
else:
model = self.model
return keract.get_gradients_of_activations(
model,
x,
y,
layer_names=layer_names)
def activation_gradients(
self,
layer_names: Union[str, list],
data = None,
data_type='training',
x=None,
y=None,
examples_to_use=None,
plot_type="2D",
):
"""Plots the gradients o activations/outputs of layers
Arguments:
layer_names : the layer name for which the gradients of its outputs
are to be plotted.
data :
raw unprepared data from which will be forwarded to
:py:meth:`ai4water.preprocessing.DataSet` to extract x and y
which will be given to NN to get gradients of ativations
data_type : str
either ``training``, ``validation`` or ``test``. Only relevant
if ``data`` argument is active
x : alternative to data
y : alternative to data
examples_to_use : the examples from the data to use. If None, then all
examples will be used, which is equal to the length of data.
plot_type :
"""
if plot_type == "2D":
return self.activation_gradients_2D(
layer_names,
data=data,
data_type=data_type,
x=x, y=y,
examples_to_use=examples_to_use
)
return self.activation_gradients_1D(
layer_names,
data=data,
data_type=data_type,
x=x,
y=y,
examples_to_use=examples_to_use
)
def activation_gradients_2D(
self,
layer_names=None,
data = None,
data_type:str='training',
x=None,
y=None,
examples_to_use=None
):
"""Plots activations of intermediate layers except input and output
Arguments:
layer_names :
data :
raw unprepared data from which will be forwarded to
:py:meth:`ai4water.preprocessing.DataSet` to extract input
data_type : str
either ``training``, ``validation`` or ``test``. Only relevant
if ``data`` argument is active
x :
y :
examples_to_use : if integer, it will be the number of examples to use.
If array like, it will be index of examples to use
"""
gradients = self.get_activation_gradients(
layer_names=layer_names,
data=data,
data_type=data_type,
x=x,
y=y)
return self._plot_act_grads(gradients, examples_to_use)
def activation_gradients_1D(
self,
layer_names,
data = None,
data_type:str='training',
x=None,
y=None,
examples_to_use=None
):
"""Plots gradients of layer outputs as 1D
Arguments:
layer_names :
examples_to_use :
data :
raw unprepared data from which will be forwarded to
:py:meth:`ai4water.preprocessing.DataSet` to extract input
data_type : str
either ``training``, ``validation`` or ``test``. Only relevant
if ``data`` argument is active
x :
y :
"""
gradients = self.get_activation_gradients(
layer_names=layer_names,
data=data,
data_type=data_type,
x=x,
y=y)
for lyr_name, gradient in gradients.items():
fname = lyr_name + "_output_grads"
title = lyr_name + " Output Gradients"
if np.ndim(gradient) == 3:
for idx, example in enumerate(gradient):
_title = f"{title}_{idx}"
_fname = f"{fname}_{idx}"
if "LSTM" in lyr_name:
example = example.T
self.features_1d(example, name=_fname, title=_title,
xlabel="Lookback steps", ylabel="Gradients")
return
def _plot_act_grads(self, gradients, examples_to_use=24):
if self.verbosity > 0:
print("Plotting gradients of activations of layersr")
for lyr_name, gradient in gradients.items():
if examples_to_use is None:
indices = range(len(gradient))
else:
gradient, indices = choose_examples(gradient, examples_to_use)
fname = lyr_name + "_output_grads"
title = lyr_name + " Output Gradients"
if "LSTM" in lyr_name.upper() and np.ndim(gradient) in (2, 3):
if gradient.ndim == 2:
self._imshow(gradient, fname=fname, label=title, show=self.show,
xlabel="LSTM units")
else:
self.features_2d(gradient,
name=fname,
title=indices,
show=self.show,
n_rows=6,
sup_title=title,
sup_xlabel="LSTM units",
sup_ylabel="Lookback steps")
elif np.ndim(gradient) == 2:
if gradient.shape[1] > 1:
# (?, ?)
self._imshow(gradient, title, self.show, fname)
elif gradient.shape[1] == 1:
# (? , 1)
self.plot1d(np.squeeze(gradient), title, self.show, fname)
elif np.ndim(gradient) == 3 and gradient.shape[1] == 1:
if gradient.shape[2] == 1:
# (?, 1, 1)
self.plot1d(np.squeeze(gradient), title, self.show, fname)
else:
# (?, 1, ?)
self._imshow(np.squeeze(gradient), title, self.show, fname)
elif np.ndim(gradient) == 3:
if gradient.shape[2] == 1:
# (?, ?, 1)
self._imshow(np.squeeze(gradient), title, self.show, fname)
elif gradient.shape[2] > 1:
# (?, ?, ?)
self._imshow_3d(gradient, lyr_name, self.show)
else:
print("""
ignoring activation gradients for {} because it has shape {} {}
""".format(lyr_name, gradient.shape, np.ndim(gradient)))
def _get_xy_from_data(self, data, data_type:str = "training"):
assert data is not None, f"If x is not given, data must be given"
assert isinstance(data_type, str)
data = getattr(self.model, f'{data_type}_data')(data=data)
x, y = maybe_three_outputs(data)
return x, y
def get_weight_gradients(
self,
x=None,
y=None,
data = None,
data_type: str = 'training'
) -> dict:
"""Returns the gradients of weights.
Arguments:
x :
inputs, if not given, then ``data`` must be given
y :
target
data :
raw unprepared data from which will be forwarded to
:py:meth:`ai4water.preprocessing.DataSet` to extract x and y
data_type : str
either ``training``, ``validation`` or ``test``. Only relevant
if ``data`` argument is active
Returns:
dictionary whose keys are names of layers and values are gradients of
weights as numpy arrays.
"""
if x is None:
x, y = self._get_xy_from_data(data=data, data_type=data_type)
from ai4water.functional import Model as FModel
if isinstance(self.model, FModel):
model = self.model._model
else:
model = self.model
return keract.get_gradients_of_trainable_weights(model, x, y)
def weight_gradients(
self,
layer_names: Union[str, list] = None,
data = None,
data_type='training',
x=None,
y=None,
):
"""Plots gradient of all trainable weights
Arguments:
layer_names : the layer whose weeights are to be considered.
data :
raw unprepared data from which will be forwarded to
:py:meth:`ai4water.preprocessing.DataSet` to extract x and y
data_type : str
either ``training``, ``validation`` or ``test``. Only relevant
if ``data`` argument is active
x : alternative to data
y : alternative to data
"""
gradients = self.get_weight_gradients(
data=data, data_type=data_type, x=x, y=y)
if layer_names is None:
layers_to_plot = list(gradients.keys())
elif isinstance(layer_names, str):
layers_to_plot = [layer_names]
else:
layers_to_plot = layer_names
if self.verbosity > 0:
print("Plotting gradients of trainable weights")
for lyr_to_plot in layers_to_plot:
for lyr_name, gradient in gradients.items():
# because lyr_name is most likely larger
if lyr_to_plot in lyr_name:
title = lyr_name + "Weight Gradients"
fname = lyr_name + '_weight_grads'
rnn_args = None
if "LSTM" in title.upper():
rnn_args = {
'n_gates': 4,
'gate_names_str': "(input, forget, cell, output)"}
if np.ndim(gradient) == 3:
self.rnn_histogram(gradient, name=fname, title=title)
if np.ndim(gradient) == 2 and gradient.shape[1] > 1:
self._imshow(gradient, title, show=self.show, fname=fname,
rnn_args=rnn_args)
elif len(gradient) and np.ndim(gradient) < 3:
self.plot1d(gradient, title, show=self.show, fname=fname,
rnn_args=rnn_args)
else:
print(f"""ignoring weight gradients for {lyr_name}
because it has shape {gradient.shape} {np.ndim(gradient)}
""")
return
def find_num_lstms(self, layer_names=None) -> list:
"""Finds names of lstm layers in model"""
if layer_names is not None:
if isinstance(layer_names, str):
layer_names = [layer_names]
assert isinstance(layer_names, list)
lstm_names = []
for lyr, config in self.config['model']['layers'].items():
if "LSTM" in lyr.upper():
config = config.get('config', config)
prosp_name = config.get('name', lyr)
if layer_names is not None:
if prosp_name in layer_names:
lstm_names.append(prosp_name)
else:
lstm_names.append(prosp_name)
return lstm_names
def get_rnn_weights(self, weights: dict, layer_names=None) -> dict:
"""Finds RNN related weights.
It combines kernel recurrent curnel and bias of each layer into a list.
"""
lstm_weights = {}
if self.config['model'] is not None and 'layers' in self.config['model']:
if "LSTM" in self.config['model']['layers']:
lstms = self.find_num_lstms(layer_names)
for lstm in lstms:
lstm_w = []
for w in ["kernel", "recurrent_kernel", "bias"]:
w_name = lstm + "/lstm_cell/" + w
w_name1 = f"{lstm}/{w}"
for k, v in weights.items():
if any(_w in k for _w in [w_name, w_name1]):
lstm_w.append(v)
lstm_weights[lstm] = lstm_w
return lstm_weights
def rnn_weights_histograms(self, layer_name):
weights = self.get_weights()
rnn_weights = self.get_rnn_weights(weights, layer_name)
for k, w in rnn_weights.items():
self.rnn_histogram(w, name=k + "_weight_histogram")
return
def rnn_weight_grads_as_hist(
self,
layer_name=None,
data=None,
data_type='training',
x=None,
y=None,
):
gradients = self.get_weight_gradients(
data=data, data_type=data_type, x=x, y=y)
rnn_weights = self.get_rnn_weights(gradients)
for k, w in rnn_weights.items():
self.rnn_histogram(w, name=k + "_weight_grads_histogram")
return
def rnn_histogram(self, data, save=True, name='', **kwargs):
if save:
save = os.path.join(self.vis_path, name + "0D.png")
else:
save = None
if rnn_histogram is None:
warnings.warn("install see-rnn to plot rnn_histogram plot", UserWarning)
else:
rnn_histogram(data, RNN_INFO["LSTM"], bins=400, savepath=save,
show=self.show, **kwargs)
return
def decision_tree(self, show=False, **kwargs):
"""Plots the decision tree"""
plot_tree = sklearn.tree.plot_tree
fname = os.path.join(self.path, "decision_tree")
if self.model.category == "ML":
model_name = list(self.model.config['model'].keys())[0]
if model_name in TREE_BASED_MODELS:
_fig, axis = plt.subplots(figsize=kwargs.get('figsize', (10, 10)))
if model_name.startswith("XGB"):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.plot_tree
xgboost.plot_tree(self.model._model, ax=axis, **kwargs)
elif model_name.startswith("Cat"):
gv_object = self.model._model.plot_tree(0, **kwargs)
if self.show:
gv_object.view()
gv_object.save(filename="decision_tree", directory=self.path)
elif model_name.startswith("LGBM"):
lightgbm.plot_tree(self.model._model, ax=axis, **kwargs)
else: # sklearn types
plot_tree(self.model._model,
feature_names=self.model.input_features,
ax=axis, **kwargs)
plt.savefig(fname, dpi=500)
if self.show:
plt.show()
else:
print(f"decision tree can not be plotted for {model_name}")
else:
print(f"""
decision tree can not be plotted for {self.model.category} models""")
return
def decision_tree_leaves(
self,
data = None,
data_type:str='training'
):
"""Plots dtreeviz related plots if dtreeviz is installed
data :
raw unprepared data from which will be forwarded to
:py:meth:`ai4water.preprocessing.DataSet` to extract input
data_type : str
either ``training``, ``validation`` or ``test``. Only relevant
if ``data`` argument is active
"""
model = list(self.config['model'].keys())[0]
if model in ["DecisionTreeRegressor", "DecisionTreeClassifier"]:
if trees is None:
print("dtreeviz related plots can not be plotted")
else:
x, y = getattr(self.model, f'{data_type}_data')(data=data)
if np.ndim(y) > 2:
y = np.squeeze(y, axis=2)
trees.viz_leaf_samples(self.model._model, x, self.in_cols)
self.save_or_show(self.save, fname="viz_leaf_samples", where="plots")
trees.ctreeviz_leaf_samples(self.model._model, x, y,
self.in_cols)
self.save_or_show(self.save, fname="ctreeviz_leaf_samples",
where="plots")
return
def features_2d(self, data, name, save=True, slices=24, slice_dim=0, **kwargs):
"""Calls the features_2d from see-rnn"""
st=0
if 'title' in kwargs:
title = kwargs.pop('title')
else:
title = None
for en in np.arange(slices, data.shape[slice_dim] + slices, slices):
if save:
fname = name + f"_{st}_{en}"
save = os.path.join(self.path, fname+".png")
else:
save = None
if isinstance(title, np.ndarray):
_title = title[st:en]
else:
_title = title
if slice_dim == 0:
features_2D(data[st:en, :], savepath=save, title=_title, **kwargs)
else:
# assuming it will always be the last dim if not first
features_2D(data[..., st:en], savepath=save, title=_title, **kwargs)
st = en
return
def features_1d(self, data, save=True, name='', **kwargs):
if save:
save = os.path.join(self.path, name + ".png")
else:
save=None
if features_1D is None:
warnings.warn("install see-rnn to plot features-1D plot", UserWarning)
else:
features_1D(data, savepath=save, **kwargs)
return
def features_2D(data,
n_rows=None,
cmap=None,
sup_xlabel=None,
sup_ylabel=None,
sup_title=None,
title=None,
show=False,
savepath=None):
"""
title: title for individual axis
sup_title: title for whole plot
"""
n_subplots = len(data) if data.ndim == 3 else 1
nrows, ncols = get_nrows_ncols(n_rows, n_subplots)
cmap = cmap or random.choice(CMAPS)
fig, axis = plt.subplots(nrows=nrows, ncols=ncols,
dpi=100, figsize=(10, 10),
sharex='all', sharey='all')
num_subplots = len(axis.ravel()) if isinstance(axis, np.ndarray) else 1
if isinstance(title, str):
title = [title for _ in range(num_subplots)]
elif isinstance(title, list):
assert len(title) == num_subplots
elif isinstance(title, np.ndarray):
assert len(title) == num_subplots
elif title:
title = np.arange(num_subplots)
if isinstance(axis, plt.Axes):
axis = np.array([axis])
vmin = data.min()
vmax = data.max()
for idx, ax in enumerate(axis.flat):
im = ep.imshow(data[idx],
ax=ax,
cmap=cmap, vmin=vmin, vmax=vmax,
ax_kws=dict(title=title[idx]),
show=False)
if sup_xlabel:
fig.text(0.5, 0.04, sup_xlabel, ha='center', fontsize=20)
if sup_ylabel:
fig.text(0.04, 0.5, sup_ylabel, va='center', rotation='vertical',
fontsize=20)
fig.subplots_adjust(hspace=0.2)
cbar_ax = fig.add_axes([0.92, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
if sup_title:
plt.suptitle(sup_title)
# fig.tight_layout()
if savepath:
plt.savefig(savepath, bbox_inches="tight")
if show:
plt.show()
return
def features_1D(data, xlabel=None, ylabel=None, savepath=None, show=None,
title=None):
assert data.ndim == 2
_, axis = plt.subplots()
for i in data:
axis.plot(i)
if xlabel:
axis.set_xlabel(xlabel)
if ylabel:
axis.set_ylabel(ylabel)
if title:
axis.set_title(title)
if savepath:
plt.savefig(savepath, bbox_inches="tight")
if show:
plt.show()
return
def _get_batch_input(inp, batch_idx, batch_size):
if isinstance(inp, list):
batch_inp = []
for x in inp:
st = batch_idx*batch_size
batch_inp.append(x[st: st+batch_size])
else:
st = batch_idx * batch_size
batch_inp = inp[st: st+batch_size]
return batch_inp | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/postprocessing/visualize/_main.py | _main.py |
import re
from weakref import WeakKeyDictionary
from ai4water.backend import np
metric_dict = {
'Exa': 1e18,
'Peta': 1e15,
'Tera': 1e12,
'Giga': 1e9,
'Mega': 1e6,
'Kilo': 1e3,
'Hecto': 1e2,
'Deca': 1e1,
None: 1,
'Deci': 1e-1,
'Centi': 1e-2,
'Milli': 1e-3,
'Micro': 1e-6,
'Nano': 1e-9,
'Pico': 1e-12,
'Femto': 1e-15,
'Atto': 1e-18
}
time_dict = {
'Year': 31540000,
'Month': 2628000,
'Weak': 604800,
'Day': 86400,
'Hour': 3600,
'Minute': 60,
'Second': 1,
}
imperial_dist_dict = {
'Mile': 63360,
'Furlong': 7920,
'Rod': 198,
'Yard': 36,
'Foot': 12,
'Inch': 1
}
unit_plurals = {
"Inches": "Inch",
"Miles": "Mile",
"Meters": "Meter",
"Feet": "Foot"
}
def check_plurals(unit):
if unit in unit_plurals:
unit = unit_plurals[unit]
return unit
def split_speed_units(unit):
dist = unit.split("Per")[0]
zeit = unit.split("Per")[1]
# distance and speed may contain underscore ('_') at start or at end e.g. when unit is "Meter_Per_Second"
# we need to remove all such underscores
dist = dist.replace("_", "")
zeit = zeit.replace("_", "")
if dist in unit_plurals:
dist = unit_plurals[dist]
return dist, zeit
class WrongUnitError(Exception):
def __init__(self, u_type, qty, unit, allowed, prefix=None):
self.u_type = u_type
self.qty = qty
self.unit = unit
self.allowed = allowed
self.pre = prefix
def __str__(self):
if self.pre is None:
return '''
*
* {} unit `{}` for {} is wrong. Use either of {}
*
'''.format(self.u_type, self.unit, self.qty, self.allowed)
# prefix {milli} provided for {input} unit of {temperature} is wrong. {input} unit is {millipascal}, allowed are {}}
else:
return """
*
* prefix `{}` provided for {} unit of {} is wrong.
* {} unit is: {}. Allowed units are
* {}.
*
""".format(self.pre, self.u_type, self.qty, self.u_type, self.unit, self.allowed)
def check_converter(converter):
super_keys = converter.keys()
for k, v in converter.items():
sub_keys = v.keys()
if all(x in super_keys for x in sub_keys):
a = 1
else:
a = 0
if all(x in sub_keys for x in super_keys):
b = 1
else:
b = 0
assert a == b
TempUnitConverter = {
"FAHRENHEIT": {
"Fahrenheit": lambda fahrenheit: fahrenheit * 1.0, # fahrenheit to Centigrade
"Kelvin": lambda fahrenheit: (fahrenheit + 459.67) * 5/9, # fahrenheit to kelvin
"Centigrade": lambda fahrenheit: (fahrenheit - 32.0) / 1.8 # fahrenheit to Centigrade
},
"KELVIN": {
"Fahrenheit": lambda kelvin: kelvin * 9/5 - 459.67, # kelvin to fahrenheit
"Kelvin": lambda k: k*1.0, # Kelvin to Kelvin
"Centigrade": lambda kelvin: kelvin - 273.15 # kelvin to Centigrade}
},
"CENTIGRADE": {
"Fahrenheit": lambda centigrade: centigrade * 1.8 + 32, # Centigrade to fahrenheit
"Kelvin": lambda centigrade: centigrade + 273.15, # Centigrade to kelvin
"Centigrade": lambda centigrade: centigrade * 1.0}
}
PressureConverter = {
"Pascal": { # Pascal to
"Pascal": lambda pascal: pascal,
"Bar": lambda pascal: pascal * 1e-5,
"Atm": lambda pascal: pascal / 101325,
"Torr": lambda pascal: pascal * 0.00750062,
"Psi": lambda pascal: pascal / 6894.76,
"Ta": lambda pascal: pascal * 1.01971621298E-5
},
"Bar": { # Bar to
"Pascal": lambda bar: bar / 0.00001,
"Bar": lambda bar: bar,
"Atm": lambda bar: bar / 1.01325,
"Torr": lambda bar: bar * 750.062,
"Psi": lambda bar: bar * 14.503,
"Ta": lambda bar: bar * 1.01972
},
"Atm": { # Atm to
"Pascal": lambda atm: atm * 101325,
"Bar": lambda atm: atm * 1.01325,
"Atm": lambda atm: atm,
"Torr": lambda atm: atm * 760,
"Psi": lambda atm: atm * 14.6959,
"At": lambda atm: atm * 1.03322755477
},
"Torr": { # Torr to
"Pascal": lambda torr: torr / 0.00750062,
"Bar": lambda torr: torr / 750.062,
"Atm": lambda torr: torr / 760,
"Torr": lambda tor: tor,
"Psi": lambda torr: torr / 51.7149,
"Ta": lambda torr: torr * 0.00135950982242
},
"Psi": { # Psi to
"Pascal": lambda psi: psi * 6894.76,
"Bar": lambda psi: psi / 14.5038,
"Atm": lambda psi: psi / 14.6959,
"Torr": lambda psi: psi * 51.7149,
"Psi": lambda psi: psi,
"Ta": lambda psi: psi * 0.0703069578296,
},
"Ta": { # Ta to
"Pascal": lambda at: at / 1.01971621298E-5,
"Bar": lambda at: at / 1.0197,
"Atm": lambda at: at / 1.03322755477,
"Torr": lambda at: at / 0.00135950982242,
"Psi": lambda at: at / 0.0703069578296,
"Ta": lambda ta: ta
}
}
DistanceConverter = {
"Meter": {
"Meter": lambda meter: meter,
"Inch": lambda meter: meter * 39.3701
},
"Inch": {
"Meter": lambda inch: inch * 0.0254,
"Inch": lambda inch: inch
}
}
class Pressure(object):
"""
```python
p = Pressure(20, "Pascal")
print(p.MilliBar) #>> 0.2
print(p.Bar) #>> 0.0002
p = Pressure(np.array([10, 20]), "KiloPascal")
print(p.MilliBar) # >> [100, 200]
p = Pressure(np.array([1000, 2000]), "MilliBar")
print(p.KiloPascal) #>> [100, 200]
print(p.Atm) # >> [0.98692, 1.9738]
```
"""
def __init__(self, val, input_unit):
self.val = val
check_converter(PressureConverter)
self.input_unit = input_unit
@property
def allowed(self):
return list(PressureConverter.keys())
@property
def input_unit(self):
return self._input_unit
@input_unit.setter
def input_unit(self, in_unit):
self._input_unit = in_unit
def __getattr__(self, out_unit):
# pycharm calls this method for its own working, executing default behaviour at such calls
if out_unit.startswith('_'):
return self.__getattribute__(out_unit)
else:
act_iu, iu_pf = self._preprocess(self.input_unit, "Input")
act_ou, ou_pf = self._preprocess(out_unit, "Output")
if act_iu not in self.allowed:
raise WrongUnitError("Input", self.__class__.__name__, act_iu, self.allowed)
if act_ou not in self.allowed:
raise WrongUnitError("output", self.__class__.__name__, act_ou, self.allowed)
ou_f = PressureConverter[act_iu][act_ou](self.val)
val = np.round(np.array((iu_pf * ou_f) / ou_pf), 5)
return val
def _preprocess(self, given_unit, io_type="Input"):
split_u = split_units(given_unit)
if len(split_u) < 1: # Given unit contained no capital letter so list is empty
raise WrongUnitError(io_type, self.__class__.__name__, given_unit, self.allowed)
pf, ou_pf = 1.0, 1.0
act_u = split_u[0]
if len(split_u) > 1:
pre_u = split_u[0] # prefix of input unit
act_u = split_u[1] # actual input unit
if pre_u in metric_dict:
pf = metric_dict[pre_u] # input unit prefix factor
else:
raise WrongUnitError(io_type, self.__class__.__name__, act_u, self.allowed, pre_u)
return act_u, pf
class NotString:
def __init__(self):
self.data = WeakKeyDictionary()
def __get__(self, instance, owner):
return self.data[instance]
def __set__(self, instance, value):
if isinstance(value, list) or isinstance(value, np.ndarray):
value = np.array(value).astype(np.float32)
self.data[instance] = value
def __set_name__(self, owner, name):
self.name = name
class Temp(object):
"""
The idea is to write the conversion functions in a dictionary and then dynamically create attribute it the attribute
is present in converter as key otherwise raise WongUnitError.
converts temperature among units [kelvin, centigrade, fahrenheit]
:param `temp` a numpy array
:param `input_unit` str, units of temp, should be "Kelvin", "Centigrade" or "Fahrenheit"
Example:
```python
temp = np.arange(10)
T = Temp(temp, 'Centigrade')
T.Kelvin
>> array([273 274 275 276 277 278 279 280 281 282])
T.Fahrenheit
>> array([32. , 33.8, 35.6, 37.4, 39.2, 41. , 42.8, 44.6, 46.4, 48.2])
T.Centigrade
>>array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
```
"""
val = NotString()
def __init__(self, val, input_unit):
self.val = val
check_converter(TempUnitConverter)
self.input_unit = input_unit
def __getattr__(self, out_unit):
# pycharm calls this method for its own working, executing default behaviour at such calls
if out_unit.startswith('_'):
return self.__getattribute__(out_unit)
else:
if out_unit not in TempUnitConverter[self.input_unit]:
raise WrongUnitError("output", self.__class__.__name__, out_unit, self.allowed)
val = TempUnitConverter[self.input_unit][str(out_unit)](self.val)
return val
@property
def allowed(self):
return list(TempUnitConverter.keys())
@property
def input_unit(self):
return self._input_unit
@input_unit.setter
def input_unit(self, in_unit):
if in_unit.upper() == 'CELCIUS':
in_unit = 'CENTIGRADE'
if in_unit.upper() not in self.allowed:
raise WrongUnitError("Input", self.__class__.__name__, in_unit, self.allowed)
self._input_unit = in_unit.upper()
class Distance(object):
"""
unit converter for distance or length between different imperial and/or metric units.
```python
t = Distance(np.array([2.0]), "Mile")
np.testing.assert_array_almost_equal(t.Inch, [126720], 5)
np.testing.assert_array_almost_equal(t.Meter, [3218.688], 5)
np.testing.assert_array_almost_equal(t.KiloMeter, [3.218688], 5)
np.testing.assert_array_almost_equal(t.CentiMeter, [321869], 0)
np.testing.assert_array_almost_equal(t.Foot, [10560.], 5)
t = Distance(np.array([5000]), "MilliMeter")
np.testing.assert_array_almost_equal(t.Inch, [196.85039], 5)
np.testing.assert_array_almost_equal(t.Meter, [5.0], 5)
np.testing.assert_array_almost_equal(t.KiloMeter, [0.005], 5)
np.testing.assert_array_almost_equal(t.CentiMeter, [500.0], 5)
np.testing.assert_array_almost_equal(t.Foot, [16.404199], 5)
```
"""
def __init__(self, val, input_unit):
self.val = val
self.input_unit = input_unit
@property
def allowed(self):
return list(imperial_dist_dict.keys()) + ['Meter']
@property
def input_unit(self):
return self._input_unit
@input_unit.setter
def input_unit(self, in_unit):
self._input_unit = in_unit
def __getattr__(self, out_unit):
# pycharm calls this method for its own working, executing default behaviour at such calls
if out_unit.startswith('_'):
return self.__getattribute__(out_unit)
else:
act_iu, iu_pf = self._preprocess(self.input_unit, "Input")
act_ou, ou_pf = self._preprocess(out_unit, "Output")
act_iu = check_plurals(act_iu)
act_ou = check_plurals(act_ou)
if act_iu not in self.allowed:
raise WrongUnitError("Input", self.__class__.__name__, act_iu, self.allowed)
if act_ou not in self.allowed:
raise WrongUnitError("output", self.__class__.__name__, act_ou, self.allowed)
out_in_meter = self._to_meters(ou_pf, act_ou) # get number of meters in output unit
input_in_meter = self.val * iu_pf # for default case when input unit has Meter in it
# if input unit is in imperial system, first convert it into inches and then into meters
if act_iu in imperial_dist_dict:
input_in_inches = imperial_dist_dict[act_iu] * self.val * iu_pf
input_in_meter = DistanceConverter['Inch']['Meter'](input_in_inches)
val = input_in_meter / out_in_meter
return val
def _to_meters(self, prefix, actual_unit):
meters = prefix
if actual_unit != "Meter":
inches = imperial_dist_dict[actual_unit] * prefix
meters = DistanceConverter['Inch']['Meter'](inches)
return meters
def _preprocess(self, given_unit, io_type="Input"):
# TODO unit must not be split based on capital letters, it is confusing and prone to erros
split_u = split_units(given_unit)
if len(split_u) < 1: # Given unit contained no capital letter so list is empty
raise WrongUnitError(io_type, self.__class__.__name__, given_unit, self.allowed)
pf, ou_pf = 1.0, 1.0
act_u = split_u[0]
if len(split_u) > 1:
pre_u = split_u[0] # prefix of input unit
act_u = split_u[1] # actual input unit
if pre_u in metric_dict:
pf = metric_dict[pre_u] # input unit prefix factor
else:
raise WrongUnitError(io_type, self.__class__.__name__, act_u, self.allowed, pre_u)
return act_u, pf
class Time(object):
"""
```python
t = Time(np.array([100, 200]), "Hour")
np.testing.assert_array_almost_equal(t.Day, [4.16666667, 8.33333333], 5)
t = Time(np.array([48, 24]), "Day")
np.testing.assert_array_almost_equal(t.Minute, [69120., 34560.], 5)
```
"""
def __init__(self, val, input_unit):
self.val = val
self.input_unit = input_unit
@property
def allowed(self):
return list(time_dict.keys())
@property
def input_unit(self):
return self._input_unit
@input_unit.setter
def input_unit(self, in_unit):
self._input_unit = in_unit
def __getattr__(self, out_unit):
# pycharm calls this method for its own working, executing default behaviour at such calls
if out_unit.startswith('_'):
return self.__getattribute__(out_unit)
else:
act_iu, iu_pf = self._preprocess(self.input_unit, "Input")
act_ou, ou_pf = self._preprocess(out_unit, "Output")
if act_iu not in self.allowed:
raise WrongUnitError("Input", self.__class__.__name__, act_iu, self.allowed)
if act_ou not in self.allowed:
raise WrongUnitError("output", self.__class__.__name__, act_ou, self.allowed)
in_sec = time_dict[act_iu] * self.val * iu_pf
val = in_sec / (time_dict[act_ou]*ou_pf)
return val
def _preprocess(self, given_unit, io_type="Input"):
split_u = split_units(given_unit)
if len(split_u) < 1: # Given unit contained no capital letter so list is empty
raise WrongUnitError(io_type, self.__class__.__name__, given_unit, self.allowed)
pf, ou_pf = 1.0, 1.0
act_u = split_u[0]
if len(split_u) > 1:
pre_u = split_u[0] # prefix of input unit
act_u = split_u[1] # actual input unit
if pre_u in metric_dict:
pf = metric_dict[pre_u] # input unit prefix factor
else:
raise WrongUnitError(io_type, self.__class__.__name__, act_u, self.allowed, pre_u)
return act_u, pf
class Speed(object):
"""
converts between different units using Distance and Time classes which convert
distance and time units separately. This class both classes separately and
then does the rest of the work.
```python
s = Speed(np.array([10]), "KiloMeterPerHour")
np.testing.assert_array_almost_equal(s.MeterPerSecond, [2.77777778], 5)
np.testing.assert_array_almost_equal(s.MilePerHour, [6.21371192], 5)
np.testing.assert_array_almost_equal(s.FootPerSecond, [9.11344415], 5)
s = Speed(np.array([14]), "FootPerSecond")
np.testing.assert_array_almost_equal(s.MeterPerSecond, [4.2672], 5)
np.testing.assert_array_almost_equal(s.MilePerHour, [9.54545], 5)
np.testing.assert_array_almost_equal(s.KiloMeterPerHour, [15.3619], 4)
s = Speed(np.arange(10), 'KiloMetersPerHour')
o = np.array([ 0. , 10.936, 21.872, 32.808, 43.744, 54.680, 65.616 , 76.552, 87.489, 98.425])
np.testing.assert_array_almost_equal(s.InchesPerSecond, o, 2)
```
"""
def __init__(self, val, input_unit):
self.val = val
self.input_unit = input_unit
@property
def input_unit(self):
return self._input_unit
@input_unit.setter
def input_unit(self, in_unit):
if '_' in in_unit:
raise ValueError("remove underscore from units {}".format(in_unit))
self._input_unit = in_unit
def __getattr__(self, out_unit):
# pycharm calls this method for its own working, executing default behaviour at such calls
if out_unit.startswith('_'):
return self.__getattribute__(out_unit)
else:
in_dist, in_zeit = split_speed_units(self.input_unit)
out_dist, out_zeit = split_speed_units(out_unit)
d = Distance(np.array([1]), in_dist)
dist_f = getattr(d, out_dist) # distance factor
t = Time(np.array([1]), in_zeit)
time_f = getattr(t, out_zeit) # time factor
out_val = self.val * (dist_f/time_f)
return out_val
def split_units(unit):
"""splits string `unit` based on capital letters"""
return re.findall('[A-Z][^A-Z]*', unit) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/et/converter.py | converter.py |
from ai4water.backend import np, random
# Latent heat of vaporisation [MJ.Kg-1]
LAMBDA = 2.45
#: Solar constant [ MJ m-2 min-1]
SOLAR_CONSTANT = 0.0820
SB_CONS = 3.405e-12 # per minute
ALLOWED_COLUMNS = ['temp', 'tmin', 'tmax', 'rel_hum', 'sol_rad', 'rn', 'wind_speed', 'es', 'sunshine_hrs',
'rh_min', 'rh_max', 'rh_mean', 'et_rad', 'cs_rad', 'half_hr', 'tdew', 'daylight_hrs']
def random_array(_length, lower=0.0, upper=0.5):
"""This creates a random array of length `length` and the floats vary between `lower` and `upper`."""
rand_array = np.zeros(_length)
for i in range(_length):
rand_array[i] = random.uniform(lower, upper)
return rand_array
def Colors(_name):
colors = {'sol_rad': np.array([0.84067393, 0.66066663, 0.22888342]),
'rel_hum': np.array([0.50832319, 0.53790088, 0.17337983]),
'rh_mean': np.array([0.34068018, 0.65708722, 0.19501699]),
'tmax': np.array([0.94943837, 0.34234137, 0.03188675]),
'tmin': np.array([0.91051202, 0.65414968, 0.48220781]),
'ra': np.array([0.64027147, 0.75848599, 0.59123481]),
'rn': np.array([0.68802968, 0.38316639, 0.13177745]),
'ea': np.array([0.19854081, 0.44556471, 0.35620562]),
'wind_speed': np.array([0.40293934, 0.51160837, 0.1293387]),
'jday': np.array([0.88396109, 0.14081036, 0.44402598]),
'half_hr': np.array([0.53974518, 0.48519598, 0.11808065]),
'sha': np.array([0.67873225, 0.67178641, 0.01953063])}
if 'Daily' in _name:
return np.array([0.63797563, 0.05503074, 0.07078517])
elif 'Minute' in _name:
return np.array([0.27822191, 0.7608274, 0.89536561])
elif 'Hourly' in _name:
return np.array([0.70670405, 0.71039014, 0.54375619])
elif 'Monthly' in _name:
return np.array([0.39865179, 0.61455622, 0.57515074])
elif 'Yearly' in _name:
return np.array([0.81158386, 0.182704, 0.93272506])
elif 'Sub_hourly' in _name:
return np.array([0.1844271, 0.70936978, 0.53026012])
elif _name in colors:
return colors[_name]
else:
c = random_array(3, 0.01, 0.99)
print('for ', _name, c)
return c
default_constants = {
'lat': {'desc': 'latitude in decimal degrees', 'def_val': None, 'min': -90, 'max': 90},
'long': {'desc': 'longitude in decimal degrees', 'def_val': None},
'a_s': {'desc': 'fraction of extraterrestrial radiation reaching earth on sunless days', 'def_val': 0.23},
'b_s': {'desc': """difference between fracion of extraterrestrial radiation reaching full-sun days and that
on sunless days""", 'def_val': 0.5},
'albedo': {'desc': """a numeric value between 0 and 1 (dimensionless), albedo of evaporative surface representing
the portion of the incident radiation that is reflected back at the surface. Default is 0.23 for
surface covered with short reference crop, which is for the calculation of Matt-Shuttleworth
reference crop evaporation.""", 'def_val': 0.23, 'min': 0, 'max': 1},
'abtew_k': {'desc': 'a coefficient used in Abtew', 'def_val': 0.52},
'CH': {'desc': 'crop height', 'def_val': 0.12},
'Ca': {'desc': 'Specific heat of air', 'def_val': 0.001013},
'surf_res': {'desc': "surface resistance (s/m) depends on the type of reference crop. Default is 70 for short"
" reference crop", 'def_val': 70, 'min': 0, 'max': 9999},
'alphaPT': {'desc': 'Priestley-Taylor coefficient', 'def_val': 1.26},
'ritchie_a': {'desc': "Coefficient for Richie method", 'def_val': 0.002322},
'ritchie_b': {'desc': "Coefficient for Richie method", 'def_val': 0.001548},
'ritchie_c': {'desc': "Coefficient for Ritchie Method", 'def_val': 0.11223}
} | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/et/global_variables.py | global_variables.py |
import re
import math
from easy_mpl.utils import process_axis
from .converter import Temp, Speed, Pressure
from .global_variables import ALLOWED_COLUMNS, SOLAR_CONSTANT, LAMBDA
from .global_variables import default_constants, SB_CONS
from ai4water.backend import np, pd, plt
class AttributeChecker:
def __init__(self, input_df):
self.input = self.check_in_df(input_df)
self.output = {}
self.allowed_columns = ALLOWED_COLUMNS
self.no_of_hours = None
def check_in_df(self, data_frame) -> pd.DataFrame:
if not isinstance(data_frame, pd.DataFrame):
raise TypeError("input must be a pandas dataframe")
for col in data_frame.columns:
if col not in ALLOWED_COLUMNS:
raise ValueError("""col {} given in input dataframe is not allowed. Allowed columns names are {}"""
.format(col, ALLOWED_COLUMNS))
if not isinstance(data_frame.index, pd.DatetimeIndex):
index = pd.to_datetime(data_frame.index)
if not isinstance(index, pd.DatetimeIndex):
raise TypeError("index of input dataframe must be convertible to pd.DatetimeIndex")
if data_frame.shape[0] > 1:
data_frame.index.freq = pd.infer_freq(data_frame.index)
else:
setattr(self, 'single_vale', True)
setattr(self, 'in_freq', data_frame.index.freqstr)
return data_frame
class PlotData(AttributeChecker):
"""
Methods:
plot_inputs
plot_outputs
"""
def __init__(self, input_df, units):
super(PlotData, self).__init__(input_df)
self.units = units
def plot_inputs(self, _name=False):
no_of_plots = len(self.input.columns)
plt.close('all')
fig, axis = plt.subplots(no_of_plots, sharex='all')
fig.set_figheight(no_of_plots+2)
fig.set_figwidth(10.48)
idx = 0
for ax, col in zip(axis, self.input.columns):
show_xaxis = False
if idx > no_of_plots-2:
show_xaxis = True
if col in self.units:
yl = self.units[col]
else:
yl = ' '
data = self.input[col]
process_axis(ax, data, label=col, show_xaxis=show_xaxis, ylabel=yl,
legend_kws={'markerscale':8},
max_xticks=4)
idx += 1
plt.subplots_adjust(wspace=0.001, hspace=0.001)
if _name:
plt.savefig(_name, dpi=300, bbox_inches='tight')
plt.show()
def plot_outputs(self, name='', _name=False):
def marker_scale(_col):
if 'Monthly' in _col:
return 4
elif 'Yearly' in _col:
return 10
else:
return 0.5
to_plot = []
for key in self.output.keys():
if name in key:
to_plot.append(key)
no_of_plots = len(to_plot)
plt.close('all')
fig, axis = plt.subplots(no_of_plots, sharex='all')
if no_of_plots==1:
axis = [axis]
fig.set_figheight(no_of_plots+4)
fig.set_figwidth(10.48)
idx = 0
for ax, col in zip(axis, self.output.keys()):
show_xaxis = False
if idx > no_of_plots-2:
show_xaxis = True
data = self.output[col]
process_axis(ax, data, ms=marker_scale(col), label=col, show_xaxis=show_xaxis, ylabel='mm',
legend_kws={'markerscale': 8}, max_xticks=4)
idx += 1
plt.subplots_adjust(wspace=0.001, hspace=0.001)
if _name:
plt.savefig(_name, dpi=300, bbox_inches='tight')
plt.show()
class PreProcessing(PlotData):
"""
Attributes
freq_str: str
daily_index: pd.DatetimeIndex
freq_in_mins: int
"""
def __init__(self, input_df, units, constants, calculate_at='same', verbosity=1):
super(PreProcessing, self).__init__(input_df, units)
self.units = units
self.default_cons = default_constants
self.cons = constants
self.freq_in_mins = calculate_at
self.sb_cons = self.freq_in_mins
self.lat_rad = self.cons
self._check_compatability()
self.verbosity = verbosity
@property
def seconds(self):
"""finds number of seconds between two steps of input data"""
if len(self.input) > 1:
return (self.input.index[1]-self.input.index[0])/np.timedelta64(1, 's')
@property
def sb_cons(self):
return self._sb_cons
@sb_cons.setter
def sb_cons(self, freq_in_mins):
self._sb_cons = freq_in_mins * SB_CONS
@property
def lat_rad(self):
return self._lat_rad
@lat_rad.setter
def lat_rad(self, constants):
if 'lat_rad' in constants:
self._lat_rad = constants['lat_rad']
elif 'lat_dec_deg' in constants:
self._lat_rad = constants['lat_dec_deg'] * 0.0174533 # # degree to radians
else:
raise ConnectionResetError("Provide latitude information in as lat_rat or as lat_dec_deg in constants")
@property
def freq_in_mins(self):
return self._freq_in_mins
@freq_in_mins.setter
def freq_in_mins(self, calculate_at):
if calculate_at is not None and calculate_at != 'same':
if isinstance(calculate_at, str):
in_minutes = freq_in_mins_from_string(calculate_at)
else:
raise TypeError("invalid type of frequency demanded", calculate_at)
else:
in_minutes = freq_in_mins_from_string(self.input.index.freqstr)
self._freq_in_mins = in_minutes
@property
def freq_str(self) -> str:
minutes = self.freq_in_mins
freq_str = min_to_str(minutes)
return freq_str
def daily_index(self) -> pd.DatetimeIndex:
start_year = justify_len(str(self.input.index[0].year))
end_year = justify_len(str(self.input.index[-1].year))
start_month = justify_len(str(self.input.index[0].month))
end_month = justify_len(str(self.input.index[0].month))
start_day = justify_len(str(self.input.index[0].day))
end_day = justify_len(str(self.input.index[0].day))
st = start_year + start_month + start_day
en = end_year + end_month + end_day
return pd.date_range(st, en, freq='D')
def _check_compatability(self):
self._preprocess_temp()
self._preprocess_rh()
self._check_wind_units()
self._cehck_pressure_units()
self._check_rad_units()
# getting julian day
self.input['jday'] = self.input.index.dayofyear
if self.freq_in_mins == 60:
a = self.input.index.hour
ma = np.convolve(a, np.ones((2,)) / 2, mode='same')
ma[0] = ma[1] - (ma[2] - ma[1])
self.input['half_hr'] = ma
freq = self.input.index.freqstr
if len(freq) > 1:
setattr(self, 'no_of_hours', int(freq[0]))
else:
setattr(self, 'no_of_hours', 1)
self.input['t1'] = np.zeros(len(self.input)) + self.no_of_hours
elif self.freq_in_mins < 60:
a = self.input.index.hour
b = (self.input.index.minute + self.freq_in_mins / 2.0) / 60.0
self.input['half_hr'] = a + b
self.input['t1'] = np.zeros(len(self.input)) + self.freq_in_mins / 60.0
for val in ['sol_rad', 'rn']:
if val in self.input:
if self.freq_in_mins <= 60:
self.input['is_day'] = np.where(self.input[val].values > 0.1, 1, 0)
return
def _preprocess_rh(self):
# make sure that we mean relative humidity calculated if possible
if 'rel_hum' in self.input.columns:
rel_hum = self.input['rel_hum']
rel_hum = np.where(rel_hum < 0.0, 0.0, rel_hum)
rel_hum = np.where(rel_hum >= 100.0, 100.0, rel_hum)
self.input['rh_mean'] = rel_hum
self.input['rel_hum'] = rel_hum
else:
if 'rh_min' in self.input.columns:
self.input['rh_mean'] = np.mean(np.array([self.input['rh_min'].values, self.input['rh_max'].values]),
axis=0)
return
def _preprocess_temp(self):
""" converts temperature related input to units of Centigrade if required. """
# converting temperature units to celsius
for val in ['tmin', 'tmax', 'temp', 'tdew']:
if val in self.input:
t = Temp(self.input[val].values, self.units[val])
temp = t.Centigrade
self.input[val] = np.where(temp < -30, -30, temp)
# if 'temp' is given, it is assumed to be mean otherwise calculate mean and put it as `temp` in input dataframe.
if 'temp' not in self.input.columns:
if 'tmin' in self.input.columns and 'tmax' in self.input.columns:
self.input['temp'] = np.mean(np.array([self.input['tmin'].values, self.input['tmax'].values]), axis=0)
return
def _check_wind_units(self):
# check units of wind speed and convert if needed
if 'wind_speed' in self.input:
wind = self.input['wind_speed'].values
wind = np.where(wind < 0.0, 0.0, wind)
w = Speed(wind, self.units['wind_speed'])
self.input['wind_speed'] = w.MeterPerSecond
return
def _cehck_pressure_units(self):
""" converts pressure related input to units of KiloPascal if required. """
for pres in ['ea', 'es', 'vp_def']:
if pres in self.input:
p = Pressure(self.input[pres].values, self.units[pres])
self.input[pres] = p.KiloPascal
def _check_rad_units(self):
"""
Currently it does not converts radiation units, only makes sure that they are > 0.0.
"""
for val in ['rn', 'sol_rad']:
if val in self.input:
rad = self.input[val].values
rad = np.where(rad < 0.0, 0.0, rad)
self.input[val] = rad
class TransFormData(PreProcessing):
"""
transforms input or output data to different frequencies.
"""
def __init__(self, input_df, units, constants, calculate_at='same', verbosity=1):
self.verbosity = verbosity
input_df = self.freq_check(input_df, calculate_at)
input_df = self.transform_data(input_df, calculate_at)
super(TransFormData, self).__init__(input_df, units, constants, calculate_at, verbosity)
def freq_check(self, input_df, freq: str):
"""
Makes sure that the input dataframe.index as frequency. It frequency is not there, it means it contains
missing data. In this case this method fills missing values. In such case, the argument freq must not be `same`.
"""
if input_df.shape[0] > 1:
input_df.index.freq = pd.infer_freq(input_df.index)
if input_df.index.freq is None:
if freq == 'same' or freq is None:
raise ValueError("input data does not have uniform time-step. Provide a value for argument"
" `calculate_at` ")
else:
new_freq = freq_in_mins_from_string(freq)
try:
input_df.index.freq = freq
except ValueError:
input_df = self.fill_missing_data(input_df, str(new_freq) + 'min')
return input_df
def fill_missing_data(self, df: pd.DataFrame, new_freq: str):
if self.verbosity > 0:
print("input contains missing values or time-steps")
df = force_freq(df.copy(), new_freq, 'input', 'nearest')
assert df.index.freqstr is not None
return df
def transform_data(self, input_df, calculate_at):
if calculate_at == 'same' or calculate_at is None:
df = input_df
else:
new_freq_mins = freq_in_mins_from_string(calculate_at)
old_freq_mins = freq_in_mins_from_string(input_df.index.freqstr)
if new_freq_mins == old_freq_mins:
df = input_df
elif new_freq_mins > old_freq_mins:
# we want to calculate at higher/larger time-step
print('downsampling input data from {} to {}'.format(old_freq_mins, new_freq_mins))
df = self.downsample_input(input_df, new_freq_mins)
else:
print('upsampling input data from {} to {}'.format(old_freq_mins, new_freq_mins))
# we want to calculate at smaller time-step
df = self.upsample_input(input_df, new_freq_mins)
return df
def upsample_input(self, df, out_freq):
# from larger timestep to smaller timestep, such as from daily to hourly
for col in df.columns:
df[col] = self.upsample_df(pd.DataFrame(df[col]), col, out_freq)
return df
def downsample_input(self, df, out_freq):
# from low timestep to high timestep i.e from 1 hour to 24 hour
# from hourly to daily
for col in df.columns:
df[col] = self.downsample_df(pd.DataFrame(df[col]), col, out_freq)
return df
def transform_etp(self, name):
freq_to_trans = self.get_freq()
down_sample = freq_to_trans['up_sample']
up_sample = freq_to_trans['down_sample']
for freq in up_sample:
in_col_name = 'et_' + name + '_' + self.freq_str
freq_str = min_to_str(freq)
out_col_name = 'et_' + name + '_' + freq_str
self.output[out_col_name] = self.upsample_df(pd.DataFrame(self.output[in_col_name]), 'et', freq)
for freq in down_sample:
in_col_name = 'et_' + name + '_' + self.freq_str
freq_str = min_to_str(freq)
out_col_name = 'et_' + name + '_' + freq_str
self.output[out_col_name] = self.downsample_df(pd.DataFrame(self.output[in_col_name]), 'et', freq)
def downsample_df(self, data_frame: pd.DataFrame, data_name: str, out_freq: int):
# from low timestep to high timestep i.e from 1 hour to 24 hour
# from hourly to daily
col_name = data_frame.columns[0]
data_frame = data_frame.copy()
old_freq = data_frame.index.freq
if self.verbosity > 1:
print('downsampling {} data from {} to {}'.format(col_name, old_freq, min_to_str(out_freq)))
out_freq = str(out_freq) + 'min'
# e.g. from hourly to daily
if data_name in ['temp', 'rel_hum', 'rh_min', 'rh_max', 'uz', 'u2', 'wind_speed_kph', 'q_lps']:
return data_frame.resample(out_freq).mean()
elif data_name in ['rain_mm', 'ss_gpl', 'sol_rad', 'etp', 'et']:
return data_frame.resample(out_freq).sum()
def upsample_df(self, data_frame, data_name, out_freq_int):
# from larger timestep to smaller timestep, such as from daily to hourly
out_freq = str(out_freq_int) + 'min'
col_name = data_frame.columns[0]
old_freq = data_frame.index.freqstr
nan_idx = data_frame.isna() # preserving indices with nan values
nan_idx_r = nan_idx.resample(out_freq).ffill()
nan_idx_r = nan_idx_r.fillna(False) # the first value was being filled with NaN, idk y?
data_frame = data_frame.copy()
if self.verbosity > 1:
print('upsampling {} data from {} to {}'.format(data_name, old_freq, min_to_str(out_freq_int)))
# e.g from monthly to daily or from hourly to sub_hourly
if data_name in ['temp', 'rel_hum', 'rh_min', 'rh_max', 'uz', 'u2', 'q_lps']:
data_frame = data_frame.resample(out_freq).interpolate(method='linear')
# filling those interpolated values with NaNs which were NaN before interpolation
data_frame[nan_idx_r] = np.nan
elif data_name in ['rain_mm', 'ss_gpl', 'sol_rad', 'pet', 'pet_hr', 'et', 'etp']:
# distribute rainfall equally to smaller time steps. like hourly 17.4 will be 1.74 at 6 min resolution
idx = data_frame.index[-1] + get_offset(data_frame.index.freqstr)
data_frame = data_frame.append(data_frame.iloc[[-1]].rename({data_frame.index[-1]: idx}))
data_frame = add_freq(data_frame)
df1 = data_frame.resample(out_freq).ffill().iloc[:-1]
df1[col_name] /= df1.resample(data_frame.index.freqstr)[col_name].transform('size')
data_frame = df1.copy()
# filling those interpolated values with NaNs which were NaN before interpolation
data_frame[nan_idx_r] = np.nan
return data_frame
def get_freq(self) -> dict:
""" decides which frequencies to """
all_freqs = {'Sub_hourly': {'down_sample': [1], 'up_sample': [60, 1440, 43200, 525600]},
'Hourly': {'down_sample': [1], 'up_sample': [1440, 43200, 525600]},
'Sub_daily': {'down_sample': [1, 60], 'up_sample': [1440, 43200, 525600]},
'Daily': {'down_sample': [1, 60], 'up_sample': [43200, 525600]},
'Sub_monthly': {'down_sample': [1, 60, 1440], 'up_sample': [43200, 525600]},
'Monthly': {'down_sample': [1, 60, 1440], 'up_sample': [525600]},
'Annualy': {'down_sample': [1, 60, 1440, 43200], 'up_sample': []}
}
return all_freqs[self.freq_str]
class Utils(TransFormData):
"""
Contains functions methods for calculation of ETP with various methods.
Methods:
net_rad
atm_pressure
_wind_2m
"""
def __init__(self, input_df, units, constants, calculate_at=None, verbosity: bool=1):
"""
Arguments:
calculate_at :a valid pandas dataframe frequency
verbosity :
"""
super(Utils, self).__init__(input_df, units, constants, calculate_at=calculate_at, verbosity=verbosity)
@property
def seasonal_correction(self):
"""Seasonal correction for solar time (Eqs. 57 & 58)
uses
----------
doy : scalar or array_like of shape(M, )
Day of year.
Returns
------
ndarray
Seasonal correction [hour]
"""
doy = self.input['jday']
b = 2 * math.pi * (doy - 81.) / 364.
return 0.1645 * np.sin(2 * b) - 0.1255 * np.cos(b) - 0.0250 * np.sin(b)
def net_rad(self, ea, rs=None):
"""
Calculate daily net radiation at the crop surface, assuming a grass reference crop.
Net radiation is the difference between the incoming net shortwave (or solar) radiation and the outgoing net
longwave radiation. Output can be converted to equivalent evaporation [mm day-1] using ``energy2evap()``.
Based on equation 40 in Allen et al (1998).
:uses rns: Net incoming shortwave radiation [MJ m-2 day-1]. Can be
estimated using ``net_in_sol_rad()``.
rnl: Net outgoing longwave radiation [MJ m-2 day-1]. Can be
estimated using ``net_out_lw_rad()``.
:return: net radiation [MJ m-2 timestep-1].
:rtype: float
"""
if 'rn' not in self.input:
if rs is None:
rs = self.rs()
if 'rns' not in self.input:
rns = self.net_in_sol_rad(rs)
else:
rns = self.input['rns']
rnl = self.net_out_lw_rad(rs=rs, ea=ea)
rn = np.subtract(rns, rnl)
self.input['rn'] = rn # for future use
else:
rn = self.input['rn']
return rn
def rs(self):
"""
calculate solar radiation either from temperature (as second preference, as it is les accurate) or from daily
_sunshine hours as second preference). Sunshine hours is given second preference because sunshine hours will
remain same for all years if sunshine hours data is not provided (which is difficult to obtain), but temperature
data which is easy to obtain and thus will be different for different years"""
if 'sol_rad' not in self.input.columns:
if 'sunshine_hrs' in self.input.columns:
rs = self.sol_rad_from_sun_hours()
if self.verbosity > 0:
print("Sunshine hour data is used for calculating incoming solar radiation")
elif 'tmin' in self.input.columns and 'tmax' in self.input.columns:
rs = self._sol_rad_from_t()
if self.verbosity > 0:
print("solar radiation is calculated from temperature")
else:
raise ValueError("""Unable to calculate solar radiation. Provide either of following inputs:
sol_rad, sunshine_hrs or tmin and tmax""")
else:
rs = self.input['sol_rad']
self.input['sol_rad'] = rs
return rs
def net_in_sol_rad(self, rs):
"""
Calculate net incoming solar (or shortwave) radiation (*Rns*) from gross incoming solar radiation, assuming a
grass reference crop.
Net incoming solar radiation is the net shortwave radiation resulting from the balance between incoming and
reflected solar radiation. The output can be converted to equivalent evaporation [mm day-1] using
``energy2evap()``.
Based on FAO equation 38 in Allen et al (1998).
Rns = (1-a)Rs
uses Gross incoming solar radiation [MJ m-2 day-1]. If necessary this can be estimated using functions whose
name begins with 'solar_rad_from'.
:param rs: solar radiation
albedo: Albedo of the crop as the proportion of gross incoming solar
radiation that is reflected by the surface. Default value is 0.23,
which is the value used by the FAO for a short grass reference crop.
Albedo can be as high as 0.95 for freshly fallen snow and as low as
0.05 for wet bare soil. A green vegetation over has an albedo of
about 0.20-0.25 (Allen et al, 1998).
:return: Net incoming solar (or shortwave) radiation [MJ m-2 day-1].
:rtype: float
"""
return np.multiply((1 - self.cons['albedo']), rs)
def net_out_lw_rad(self, rs, ea):
"""
Estimate net outgoing longwave radiation.
This is the net longwave energy (net energy flux) leaving the earth's surface. It is proportional to the
absolute temperature of the surface raised to the fourth power according to the Stefan-Boltzmann law. However,
water vapour, clouds, carbon dioxide and dust are absorbers and emitters of longwave radiation. This function
corrects the Stefan- Boltzmann law for humidity (using actual vapor pressure) and cloudiness (using solar
radiation and clear sky radiation). The concentrations of all other absorbers are assumed to be constant.
The output can be converted to equivalent evaporation [mm timestep-1] using ``energy2evap()``.
Based on FAO equation 39 in Allen et al (1998).
uses: Absolute daily minimum temperature [degrees Kelvin]
Absolute daily maximum temperature [degrees Kelvin]
Solar radiation [MJ m-2 day-1]. If necessary this can be estimated using ``sol+rad()``.
Clear sky radiation [MJ m-2 day-1]. Can be estimated using ``cs_rad()``.
Actual vapour pressure [kPa]. Can be estimated using functions with names beginning with 'avp_from'.
:param ea: actual vapour pressure, can be calculated using method avp_from
:param rs: solar radiation
:return: Net outgoing longwave radiation [MJ m-2 timestep-1]
:rtype: float
"""
if 'tmin' in self.input.columns and 'tmax' in self.input.columns:
added = np.add(np.power(self.input['tmax'].values+273.16, 4), np.power(self.input['tmin'].values+273.16, 4))
divided = np.divide(added, 2.0)
else:
divided = np.power(self.input['temp'].values+273.16, 4.0)
tmp1 = np.multiply(self.sb_cons, divided)
tmp2 = np.subtract(0.34, np.multiply(0.14, np.sqrt(ea)))
tmp3 = np.subtract(np.multiply(1.35, np.divide(rs, self._cs_rad())), 0.35)
return np.multiply(tmp1, np.multiply(tmp2, tmp3)) # eq 39
def sol_rad_from_sun_hours(self):
"""
Calculate incoming solar (or shortwave) radiation, *Rs* (radiation hitting a horizontal plane after
scattering by the atmosphere) from relative sunshine duration.
If measured radiation data are not available this method is preferable to calculating solar radiation from
temperature. If a monthly mean is required then divide the monthly number of sunshine hours by number of
days in the month and ensure that *et_rad* and *daylight_hours* was calculated using the day of the year
that corresponds to the middle of the month.
Based on equations 34 and 35 in Allen et al (1998).
uses: Number of daylight hours [hours]. Can be calculated using ``daylight_hours()``.
Sunshine duration [hours]. Can be calculated using ``sunshine_hours()``.
Extraterrestrial radiation [MJ m-2 day-1]. Can be estimated using ``et_rad()``.
:return: Incoming solar (or shortwave) radiation [MJ m-2 day-1]
:rtype: float
"""
# 0.5 and 0.25 are default values of regression constants (Angstrom values)
# recommended by FAO when calibrated values are unavailable.
ss_hrs = self.input['sunshine_hrs'] # sunshine_hours
dl_hrs = self.daylight_fao56() # daylight_hours
return np.multiply(np.add(self.cons['a_s'], np.multiply(np.divide(ss_hrs, dl_hrs), self.cons['b_s'])),
self._et_rad())
def _sol_rad_from_t(self, coastal=False):
"""
Estimate incoming solar (or shortwave) radiation [Mj m-2 day-1] , *Rs*, (radiation hitting a horizontal
plane after scattering by the atmosphere) from min and max temperature together with an empirical adjustment
coefficient for 'interior' and 'coastal' regions.
The formula is based on equation 50 in Allen et al (1998) which is the Hargreaves radiation formula (Hargreaves
and Samani, 1982, 1985). This method should be used only when solar radiation or sunshine hours data are not
available. It is only recommended for locations where it is not possible to use radiation data from a regional
station (either because climate conditions are heterogeneous or data are lacking).
**NOTE**: this method is not suitable for island locations due to the
moderating effects of the surrounding water. """
# Determine value of adjustment coefficient [deg C-0.5] for
# coastal/interior locations
if coastal: # for 'coastal' locations, situated on or adjacent to the coast of a large l
adj = 0.19 # and mass and where air masses are influenced by a nearby water body,
else: # for 'interior' locations, where land mass dominates and air
adj = 0.16 # masses are not strongly influenced by a large water body
et_rad = None
cs_rad = None
if 'et_rad' not in self.input:
et_rad = self._et_rad()
self.input['et_rad'] = et_rad
if 'cs_rad' not in self.input:
cs_rad = self._cs_rad()
self.input['cs_rad'] = cs_rad
sol_rad = np.multiply(adj, np.multiply(np.sqrt(np.subtract(self.input['tmax'].values,
self.input['tmin'].values)), et_rad))
# The solar radiation value is constrained by the clear sky radiation
return np.min(np.array([sol_rad, cs_rad]), axis=0)
def _cs_rad(self, method='asce'):
"""
Estimate clear sky radiation from altitude and extraterrestrial radiation.
Based on equation 37 in Allen et al (1998) which is recommended when calibrated Angstrom values are not
available. et_rad is Extraterrestrial radiation [MJ m-2 day-1]. Can be estimated using ``et_rad()``.
:return: Clear sky radiation [MJ m-2 day-1]
:rtype: float
"""
if method.upper() == 'ASCE':
return (0.00002 * self.cons['altitude'] + 0.75) * self._et_rad()
elif method.upper() == 'REFET':
sc = self.seasonal_correction()
_omega = omega(solar_time_rad(self.cons['long_dec_deg'], self.input['half_hour'], sc))
else:
raise ValueError
def daylight_fao56(self):
"""get number of maximum hours of sunlight for a given latitude using equation 34 in Fao56.
Annual variation of sunlight hours on earth are plotted in figre 14 in ref 1.
dr = pd.date_range('20110903 00:00', '20110903 23:59', freq='H')
sol_rad = np.array([0.45 for _ in range(len(dr))])
df = pd.DataFrame(np.stack([sol_rad],axis=1), columns=['sol_rad'], index=dr)
constants = {'lat' : -20}
units={'solar_rad': 'MegaJoulePerMeterSquarePerHour'}
eto = ReferenceET(df,units,constants=constants)
N = np.unique(eto.daylight_fao56())
array([11.66])
1) http://www.fao.org/3/X0490E/x0490e07.htm"""
ws = self.sunset_angle()
hrs = (24/3.14) * ws
# if self.input_freq == 'Monthly':
# df = pd.DataFrame(hrs, index=self.daily_index)
# hrs = df.resample('M').mean().values.reshape(-1,)
return hrs
def _et_rad(self):
"""
Estimate extraterrestrial radiation (*Ra*, 'top of the atmosphere radiation').
For daily, it is based on equation 21 in Allen et al (1998). If monthly mean radiation is required make
sure *sol_dec*. *sha* and *irl* have been calculated using the day of the year that corresponds to the middle
of the month.
**Note**: From Allen et al (1998): "For the winter months in latitudes greater than 55 degrees (N or S),
the equations have limited validity. Reference should be made to the Smithsonian Tables to assess possible
deviations."
:return: extraterrestrial radiation [MJ m-2 timestep-1]
:rtype: float
dr = pd.date_range('20110903 00:00', '20110903 23:59', freq='D')
sol_rad = np.array([0.45 ])
df = pd.DataFrame(np.stack([sol_rad],axis=1), columns=['sol_rad'], index=dr)
constants = {'lat' : -20}
units={'sol_rad': 'MegaJoulePerMeterSquarePerHour'}
eto = ReferenceET(df,units,constants=constants)
ra = eto._et_rad()
[32.27]
"""
if self.freq_in_mins < 1440: # TODO should sub_hourly be different from Hourly?
j = (3.14/180) * self.cons['lat_dec_deg'] # eq 22 phi
dr = self.inv_rel_dist_earth_sun() # eq 23
sol_dec = self.dec_angle() # eq 24 # gamma
w1, w2 = self.solar_time_angle()
t1 = (12*60)/math.pi
t2 = np.multiply(t1, np.multiply(SOLAR_CONSTANT, dr))
t3 = np.multiply(np.subtract(w2, w1), np.multiply(np.sin(j), np.sin(sol_dec)))
t4 = np.subtract(np.sin(w2), np.sin(w1))
t5 = np.multiply(np.multiply(np.cos(j), np.cos(sol_dec)), t4)
t6 = np.add(t5, t3)
ra = np.multiply(t2, t6) # eq 28
elif self.freq_in_mins == 1440: # daily frequency
sol_dec = self.dec_angle() # based on julian day
sha = self.sunset_angle() # sunset hour angle[radians], based on latitude
ird = self.inv_rel_dist_earth_sun()
tmp1 = (24.0 * 60.0) / math.pi
tmp2 = np.multiply(sha, np.multiply(math.sin(self.lat_rad), np.sin(sol_dec)))
tmp3 = np.multiply(math.cos(self.lat_rad), np.multiply(np.cos(sol_dec), np.sin(sha)))
ra = np.multiply(tmp1, np.multiply(SOLAR_CONSTANT, np.multiply(ird, np.add(tmp2, tmp3)))) # eq 21
else:
raise NotImplementedError
self.input['ra'] = ra
return ra
def sunset_angle(self):
"""
calculates sunset hour angle in radians given by Equation 25 in Fao56 (1)
1): http://www.fao.org/3/X0490E/x0490e07.htm"""
if 'sha' not in self.input:
j = (3.14/180.0) * self.cons['lat_dec_deg'] # eq 22
d = self.dec_angle() # eq 24, declination angle
angle = np.arccos(-np.tan(j)*np.tan(d)) # eq 25
self.input['sha'] = angle
else:
angle = self.input['sha'].values
return angle
def inv_rel_dist_earth_sun(self):
"""
Calculate the inverse relative distance between earth and sun from day of the year.
Based on FAO equation 23 in Allen et al (1998).
ird = 1.0 + 0.033 * cos( [2pi/365] * j )
:return: Inverse relative distance between earth and the sun
:rtype: np array
"""
if 'ird' not in self.input:
inv1 = np.multiply(2*math.pi/365.0, self.input['jday'].values)
inv2 = np.cos(inv1)
inv3 = np.multiply(0.033, inv2)
ird = np.add(1.0, inv3)
self.input['ird'] = ird
else:
ird = self.input['ird']
return ird
def dec_angle(self):
"""
finds solar declination angle
"""
if 'sol_dec' not in self.input:
if self.freq_str == 'monthly':
solar_dec = np.array(0.409 * np.sin(2*3.14 * self.daily_index().dayofyear/365 - 1.39))
else:
solar_dec = 0.409 * np.sin(2*3.14 * self.input['jday'].values/365 - 1.39) # eq 24, declination angle
self.input['solar_dec'] = solar_dec
else:
solar_dec = self.input['solar_dec']
return solar_dec
def solar_time_angle(self):
"""
returns solar time angle at start, mid and end of period using equation 29, 31 and 30 respectively in Fao
w = pi/12 [(t + 0.06667 ( lz-lm) + Sc) -12]
t =standard clock time at the midpoint of the period [hour]. For example for a period between 14.00 and 15.00
hours, t = 14.5
lm = longitude of the measurement site [degrees west of Greenwich]
lz = longitude of the centre of the local time zone [degrees west of Greenwich]
w1 = w - pi*t1/24
w2 = w + pi*t1/24
where:
w = solar time angle at midpoint of hourly or shorter period [rad]
t1 = length of the calculation period [hour]: i.e., 1 for hourly period or 0.5 for a 30-minute period
www.fao.org/3/X0490E/x0490e07.htm
"""
# TODO find out how to calculate lz
# https://github.com/djlampert/PyHSPF/blob/c3c123acf7dba62ed42336f43962a5e4db922422/src/pyhspf/preprocessing/etcalculator.py#L610
lz = np.abs(15 * round(self.cons['long_dec_deg'] / 15.0))
lm = np.abs(self.cons['long_dec_deg'])
t1 = 0.0667*(lz-lm)
t2 = self.input['half_hr'].values + t1 + self.solar_time_cor()
t3 = np.subtract(t2, 12)
w = np.multiply((math.pi/12.0), t3) # eq 31, in rad
w1 = np.subtract(w, np.divide(np.multiply(math.pi, self.input['t1']).values, 24.0)) # eq 29
w2 = np.add(w, np.divide(np.multiply(math.pi, self.input['t1']).values, 24.0)) # eq 30
return w1, w2
def solar_time_cor(self):
"""seasonal correction for solar time by implementation of eqation 32 in hour, `Sc`"""
upar = np.multiply((2*math.pi), np.subtract(self.input['jday'].values, 81))
b = np.divide(upar, 364) # eq 33
t1 = np.multiply(0.1645, np.sin(np.multiply(2, b)))
t2 = np.multiply(0.1255, np.cos(b))
t3 = np.multiply(0.025, np.sin(b))
return t1-t2-t3 # eq 32
def avp_from_rel_hum(self):
"""
Estimate actual vapour pressure (*ea*) from saturation vapour pressure and relative humidity.
Based on FAO equation 17 in Allen et al (1998).
ea = [ e_not(tmin)RHmax/100 + e_not(tmax)RHmin/100 ] / 2
uses Saturation vapour pressure at daily minimum temperature [kPa].
Saturation vapour pressure at daily maximum temperature [kPa].
Minimum relative humidity [%]
Maximum relative humidity [%]
:return: Actual vapour pressure [kPa]
:rtype: float
http://www.fao.org/3/X0490E/x0490e07.htm#TopOfPage
"""
if 'ea' in self.input:
avp = self.input['ea']
else:
avp = 0.0
# TODO `shub_hourly` calculation should be different from `Hourly`
# use equation 54 in http://www.fao.org/3/X0490E/x0490e08.htm#TopOfPage
if self.freq_in_mins <= 60: # for hourly or sub_hourly
avp = np.multiply(self.sat_vp_fao56(self.input['temp'].values),
np.divide(self.input['rel_hum'].values, 100.0))
elif self.freq_in_mins == 1440:
if 'rh_min' in self.input.columns and 'rh_max' in self.input.columns:
tmp1 = np.multiply(self.sat_vp_fao56(self.input['tmin'].values),
np.divide(self.input['rh_max'].values, 100.0))
tmp2 = np.multiply(self.sat_vp_fao56(self.input['tmax'].values),
np.divide(self.input['rh_min'].values, 100.0))
avp = np.divide(np.add(tmp1, tmp2), 2.0)
elif 'rel_hum' in self.input.columns:
# calculation actual vapor pressure from mean humidity
# equation 19
t1 = np.divide(self.input['rel_hum'].values, 100)
t2 = np.divide(np.add(self.sat_vp_fao56(self.input['tmax'].values),
self.sat_vp_fao56(self.input['tmin'].values)), 2.0)
avp = np.multiply(t1, t2)
else:
raise NotImplementedError(" for frequency of {} minutes, actual vapour pressure can not be calculated"
.format(self.freq_in_mins))
self.input['ea'] = avp
return avp
def sat_vp_fao56(self, temp):
"""calculates saturation vapor pressure (*e_not*) as given in eq 11 of FAO 56 at a given temp which must be in
units of centigrade.
using Tetens equation
es = 0.6108 * exp((17.26*temp)/(temp+273.3))
where es is in KiloPascal units.
Murray, F. W., On the computation of saturation vapor pressure, J. Appl. Meteorol., 6, 203-204, 1967.
"""
# e_not_t = multiply(0.6108, np.exp( multiply(17.26939, temp) / add(temp , 237.3)))
e_not_t = np.multiply(0.6108, np.exp(np.multiply(17.27, np.divide(temp, np.add(temp, 237.3)))))
return e_not_t
def soil_heat_flux(self, rn=None):
if self.freq_in_mins == 1440:
return 0.0
elif self.freq_in_mins <= 60:
gd = np.multiply(0.1, rn)
gn = np.multiply(0.5, rn)
return np.where(self.input['is_day'] == 1, gd, gn)
elif self.freq_in_mins > 1440:
raise NotImplementedError
def mean_sat_vp_fao56(self):
""" calculates mean saturation vapor pressure (*es*) for a day, weak or month according to eq 12 of FAO 56 using
tmin and tmax which must be in centigrade units
"""
es = None
# for case when tmax and tmin are not given and only `temp` is given
if 'tmax' not in self.input:
if 'temp' in self.input:
es = self.sat_vp_fao56(self.input['temp'])
# for case when `tmax` and `tmin` are provided
elif 'tmax' in self.input:
es_tmax = self.sat_vp_fao56(self.input['tmax'].values)
es_tmin = self.sat_vp_fao56(self.input['tmin'].values)
es = np.mean(np.array([es_tmin, es_tmax]), axis=0)
else:
raise NotImplementedError
return es
def psy_const(self) -> float:
"""
Calculate the psychrometric constant.
This method assumes that the air is saturated with water vapour at the minimum daily temperature. This
assumption may not hold in arid areas.
Based on equation 8, page 95 in Allen et al (1998).
uses Atmospheric pressure [kPa].
:return: Psychrometric constant [kPa degC-1].
:rtype: array
"""
return np.multiply(0.000665, self.atm_pressure())
def slope_sat_vp(self, t):
"""
slope of the relationship between saturation vapour pressure and temperature for a given temperature
according to equation 13 in Fao56[1].
delta = 4098 [0.6108 exp(17.27T/T+237.3)] / (T+237.3)^2
:param t: Air temperature [deg C]. Use mean air temperature for use in Penman-Monteith.
:return: Saturation vapour pressure [kPa degC-1]
[1]: http://www.fao.org/3/X0490E/x0490e07.htm#TopOfPage
"""
to_exp = np.divide(np.multiply(17.27, t), np.add(t, 237.3))
tmp = np.multiply(4098, np.multiply(0.6108, np.exp(to_exp)))
return np.divide(tmp, np.power(np.add(t, 237.3), 2))
def _wind_2m(self, method='fao56', z_o=0.001):
"""
converts wind speed (m/s) measured at height z to 2m using either FAO 56 equation 47 or McMohan eq S4.4.
u2 = uz [ 4.87/ln(67.8z-5.42) ] eq 47 in [1], eq S5.20 in [2].
u2 = uz [ln(2/z_o) / ln(z/z_o)] eq S4.4 in [2]
:param `method` string, either of `fao56` or `mcmohan2013`. if `mcmohan2013` is chosen then `z_o` is used
:param `z_o` float, roughness height. Default value is from [2]
:return: Wind speed at 2 m above the surface [m s-1]
[1] http://www.fao.org/3/X0490E/x0490e07.htm
[2] McMahon, T., Peel, M., Lowe, L., Srikanthan, R. & McVicar, T. 2012. Estimating actual, potential,
reference crop and pan evaporation using standard meteorological data: a pragmatic synthesis. Hydrology and
Earth System Sciences Discussions, 9, 11829-11910.
https://www.hydrol-earth-syst-sci.net/17/1331/2013/hess-17-1331-2013-supplement.pdf
"""
# if value of height at which wind is measured is not given, then don't convert
if 'wind_z' in self.cons:
wind_z = self.cons['wind_z']
else:
wind_z = None
if wind_z is None:
if self.verbosity > 0:
print("""WARNING: givn wind data is not at 2 meter and `wind_z` is also not given. So assuming wind
given as measured at 2m height""")
return self.input['wind_speed'].values
else:
if method == 'fao56':
return np.multiply(self.input['wind_speed'], (4.87 / math.log((67.8 * wind_z) - 5.42)))
else:
return np.multiply(self.input['wind_speed'].values, math.log(2/z_o) / math.log(wind_z/z_o))
def atm_pressure(self) -> float:
"""
Estimate atmospheric pressure from altitude.
Calculated using a simplification of the ideal gas law, assuming 20 degrees Celsius for a standard atmosphere.
Based on equation 7, page 62 in Allen et al (1998).
:return: atmospheric pressure [kPa]
:rtype: float
"""
tmp = (293.0 - (0.0065 * self.cons['altitude'])) / 293.0
return math.pow(tmp, 5.26) * 101.3
def tdew_from_t_rel_hum(self):
"""
Calculates the dew point temperature given temperature and relative humidity.
Following formulation given at https://goodcalculators.com/dew-point-calculator/
The formula is
Tdew = (237.3 × [ln(RH/100) + ( (17.27×T) / (237.3+T) )]) / (17.27 - [ln(RH/100) + ( (17.27×T) / (237.3+T) )])
Where:
Tdew = dew point temperature in degrees Celsius (°C),
T = air temperature in degrees Celsius (°C),
RH = relative humidity (%),
ln = natural logarithm.
The formula also holds true as calculations shown at http://www.decatur.de/javascript/dew/index.html
"""
temp = self.input['temp']
neum = (237.3 * (np.log(self.input['rel_hum'] / 100.0) + ((17.27 * temp) / (237.3 + temp))))
denom = (17.27 - (np.log(self.input['rel_hum'] / 100.0) + ((17.27 * temp) / (237.3 + temp))))
td = neum / denom
self.input['tdew'] = td
return
def evap_pan(self):
"""
pan evaporation which is used in almost all penman related methods
"""
ap = self.cons['pen_ap']
lat = self.cons['lat_dec_deg']
rs = self.rs()
delta = self.slope_sat_vp(self.input['temp'].values)
gamma = self.psy_const()
vabar = self.avp_from_rel_hum() # Vapour pressure
vas = self.mean_sat_vp_fao56()
u2 = self._wind_2m()
r_nl = self.net_out_lw_rad(rs=rs, ea=vabar) # net outgoing longwave radiation
ra = self._et_rad()
# eq 34 in Thom et al., 1981
f_pan_u = np.add(1.201, np.multiply(1.621, u2))
# eq 4 and 5 in Rotstayn et al., 2006
p_rad = np.add(1.32, np.add(np.multiply(4e-4, lat), np.multiply(8e-5, lat**2)))
f_dir = np.add(-0.11, np.multiply(1.31, np.divide(rs, ra)))
rs_pan = np.multiply(np.add(np.add(np.multiply(f_dir, p_rad), np.multiply(1.42,
np.subtract(1, f_dir))),
np.multiply(0.42, self.cons['albedo'])), rs)
rn_pan = np.subtract(np.multiply(1-self.cons['alphaA'], rs_pan), r_nl)
# S6.1 in McMohan et al 2013
tmp1 = np.multiply(np.divide(delta, np.add(delta, np.multiply(ap, gamma))), np.divide(rn_pan, LAMBDA))
tmp2 = np.divide(np.multiply(ap, gamma), np.add(delta, np.multiply(ap, gamma)))
tmp3 = np.multiply(f_pan_u, np.subtract(vas, vabar))
tmp4 = np.multiply(tmp2, tmp3)
epan = np.add(tmp1, tmp4)
return epan
def rad_to_evap(self):
"""
converts solar radiation to equivalent inches of water evaporation
SRadIn[in/day] = SolRad[Ley/day] / ((597.3-0.57) * temp[centigrade]) * 2.54) [1]
or using equation 20 of FAO chapter 3
from TABLE 3 in FAO chap 3.
SRadIn[mm/day] = 0.408 * Radiation[MJ m-2 day-1]
SRadIn[mm/day] = 0.035 * Radiation[Wm-2]
SRadIn[mm/day] = Radiation[MJ m-2 day-1] / 2.45
SRadIn[mm/day] = Radiation[J cm-2 day-1] / 245
SRadIn[mm/day] = Radiation[Wm-2] / 28.4
[1] https://github.com/respec/BASINS/blob/4356aa9481eb7217cb2cbc5131a0b80a932907bf/atcMetCmp/modMetCompute.vb#L1251
https://github.com/DanluGuo/Evapotranspiration/blob/8efa0a2268a3c9fedac56594b28ac4b5197ea3fe/R/Evapotranspiration.R
http://www.fao.org/3/X0490E/x0490e07.htm
"""
# TODO following equation assumes radiations in langleys/day ando output in Inches
tmp1 = np.multiply(np.subtract(597.3, np.multiply(0.57, self.input['temp'].values)), 2.54)
rad_in = np.divide(self.input['sol_rad'].values, tmp1)
return rad_in
def equil_temp(self, et_daily):
# equilibrium temperature T_e
t_e = self.input['temp'].copy()
ta = self.input['temp']
vabar = self.avp_from_rel_hum()
r_n = self.net_rad(vabar) # net radiation
gamma = self.psy_const()
for i in range(9999):
v_e = 0.6108 * np.exp(17.27 * t_e/(t_e + 237.3)) # saturated vapour pressure at T_e (S2.5)
t_e_new = ta - 1 / gamma * (1 - r_n / (LAMBDA * et_daily)) * (v_e - vabar) # rearranged from S8.8
delta_t_e = t_e_new - t_e
maxdelta_t_e = np.abs(np.max(delta_t_e))
t_e = t_e_new
if maxdelta_t_e < 0.01:
break
return t_e
def freq_in_mins_from_string(input_string: str) -> int:
if has_numbers(input_string):
in_minutes = split_freq(input_string)
elif input_string.upper() in ['D', 'H', 'M', 'DAILY', 'HOURLY', 'MONTHLY', 'YEARLY', 'MIN', 'MINUTE']:
in_minutes = str_to_mins(input_string.upper())
else:
raise TypeError("invalid input string", input_string)
return int(in_minutes)
def str_to_mins(input_string: str) -> int:
d = {'MIN': 1,
'MINUTE': 1,
'DAILY': 1440,
'D': 1440,
'HOURLY': 60,
'HOUR': 60,
'H': 60,
'MONTHLY': 43200,
'M': 43200,
'YEARLY': 525600
}
return d[input_string]
def split_freq(freq_str: str) -> int:
match = re.match(r"([0-9]+)([a-z]+)", freq_str, re.I)
if match:
minutes, freq = match.groups()
if freq.upper() in ['H', 'HOURLY', 'HOURS', 'HOUR']:
minutes = int(minutes) * 60
elif freq.upper() in ['D', 'DAILY', 'DAY', 'DAYS']:
minutes = int(minutes) * 1440
return int(minutes)
else:
raise NotImplementedError
def has_numbers(input_string: str) -> bool:
return bool(re.search(r'\d', input_string))
def justify_len(string: str, length: int = 2, pad: str = '0') -> str:
if len(string) < length:
zeros_to_pad = pad * int(len(string) - length)
new_string = zeros_to_pad + string
else:
new_string = string
return new_string
def add_freq(dataframe, name=None, _force_freq=None, method=None):
"""Add a frequency attribute to idx, through inference or directly.
Returns a copy. If `freq` is None, it is inferred.
"""
idx = dataframe.index
idx = idx.copy()
# if freq is None:
if idx.freq is None:
freq = pd.infer_freq(idx)
idx.freq = freq
if idx.freq is None:
if _force_freq is not None:
dataframe = force_freq(dataframe, _force_freq, name, method=method)
else:
raise AttributeError('no discernible frequency found in {} for {}. Specify'
' a frequency string with `freq`.'.format(name, name))
else:
print('frequency {} is assigned to {}'.format(idx.freq, name))
dataframe.index = idx
return dataframe
def force_freq(data_frame, freq_to_force, name, method=None):
old_nan_counts = data_frame.isna().sum()
old_shape = data_frame.shape
dr = pd.date_range(data_frame.index[0], data_frame.index[-1], freq=freq_to_force)
df_unique = data_frame[~data_frame.index.duplicated(keep='first')] # first remove duplicate indices if present
if method:
df_idx_sorted = df_unique.sort_index()
df_reindexed = df_idx_sorted.reindex(dr, method='nearest')
else:
df_reindexed = df_unique.reindex(dr, fill_value=np.nan)
df_reindexed.index.freq = pd.infer_freq(df_reindexed.index)
new_nan_counts = df_reindexed.isna().sum()
print('Frequency {} is forced to {} dataframe, NaN counts changed from {} to {}, shape changed from {} to {}'
.format(df_reindexed.index.freq, name, old_nan_counts.values, new_nan_counts.values,
old_shape, df_reindexed.shape))
return df_reindexed
def min_to_str(minutes: int) -> str:
if minutes == 1:
freq_str = 'Minute'
elif 60 > minutes > 1:
freq_str = 'Sub_hourly'
elif minutes == 60:
freq_str = 'Hourly'
elif 1440 > minutes > 60:
freq_str = 'Sub-daily'
elif minutes == 1440:
freq_str = 'Daily'
elif 43200 > minutes > 1440:
freq_str = 'Sub-monthly'
elif minutes == 43200:
freq_str = 'Monthly'
elif 525600 > minutes > 43200:
freq_str = 'Sub-yearly'
elif minutes == 525600:
freq_str = 'Yearly'
else:
raise ValueError("Can not calculate frequency string from given frequency in minutes ", minutes)
return freq_str
time_step = {'D': 'Day', 'H': 'Hour', 'M': 'MonthEnd'}
def get_offset(freqstr: str) -> str:
offset_step = 1
if freqstr in time_step:
freqstr = time_step[freqstr]
elif has_numbers(freqstr):
in_minutes = split_freq(freqstr)
freqstr = 'Minute'
offset_step = int(in_minutes)
offset = getattr(pd.offsets, freqstr)(offset_step)
return offset
def _wrap(x, x_min, x_max):
"""Wrap floating point values into range
Parameters
----------
x : ndarray
Values to wrap.
x_min : float
Minimum value in output range.
x_max : float
Maximum value in output range.
Returns
-------
ndarray
"""
return np.mod((x - x_min), (x_max - x_min)) + x_min
def omega(solar_time):
"""Solar hour angle (Eq. 55)
Parameters
----------
solar_time : scalar or array_like of shape(M, )
Solar time (i.e. noon is 0) [hours].
Returns
-------
omega : ndarray
Hour angle [radians].
"""
_omega = (2 * math.pi / 24.0) * solar_time
# Need to adjust omega so that the values go from -pi to pi
# Values outside this range are wrapped (i.e. -3*pi/2 -> pi/2)
_omega = _wrap(_omega, -math.pi, math.pi)
return _omega
def solar_time_rad(lon, time_mid, sc):
"""Solar time (i.e. noon is 0) (Eq. 55)
Parameters
----------
lon : scalar or array_like of shape(M, )
Longitude [radians].
time_mid : scalar or array_like of shape(M, )
UTC time at midpoint of period [hours].
sc : scalar or array_like of shape(M, )
Seasonal correction [hours].
Returns
-------
ndarray
Solar time [hours].
Notes
-----
This function could be integrated into the _omega() function since they are
always called together (i.e. _omega(_solar_time_rad()). It was built
independently from _omega to eventually support having a separate
solar_time functions for longitude in degrees.
"""
return time_mid + (lon * 24 / (2 * math.pi)) + sc - 12 | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/et/utils.py | utils.py |
from .utils import Utils
from .global_variables import LAMBDA
from ai4water.backend import np, pd
# TODO, classify methods which require wind_speed, or which require solar_rad.
class ETBase(Utils):
"""
This is the base class for evapotranspiration calculation. It calculates
etp according to Jensen and Haise_ method. Any new ETP calculation must
inherit from it and must implement
the ``__call__`` method.
.. _Haise:
https://doi.org/10.1061/JRCEA4.0000287
"""
def __init__(self,
input_df: pd.DataFrame,
units:dict,
constants:dict,
**kwargs
):
"""
Parameters
---------
input_df :
units :
constants :
kwargs :
"""
self.name = self.__class__.__name__
super(ETBase, self).__init__(input_df.copy(),
units.copy(),
constants.copy(),
**kwargs)
def requirements(self, **kwargs):
if 'constants' in kwargs:
constants = kwargs['constants']
else:
constants = ['lat_dec_deg', 'altitude', 'ct', 'tx']
if 'ts' in kwargs:
ts = kwargs['ts']
else:
ts = ['temp']
for cons in constants:
if cons not in self.cons:
if cons in self.default_cons:
val = self.default_cons[cons]['def_val']
desc = self.default_cons[cons]['desc']
if val is not None:
print("Warning: default value {} of parameter {} which is {} is being used".format(val,
cons,
desc))
self.cons[cons] = val
else:
raise ValueError("Value of constant {} must be provided to calculate ETP using {}"
.format(cons, self.name))
for _ts in ts:
if _ts not in self.input.columns:
raise ValueError("Timeseries {} is required for calculation of ETP using {}"
.format(_ts, self.name))
def __call__(self, *args,
transform: bool=False,
**kwargs):
"""
as given (eq 9) in Xu and Singh, 2000 and implemented here_
uses: a_s, b_s, ct=0.025, tx=-3
Arguments:
transform : whether to transform the calculated etp to frequecies
other than at which it is calculated.
.. _here:
https://github.com/DanluGuo/Evapotranspiration/blob/8efa0a2268a3c9fedac56594b28ac4b5197ea3fe/R/Evapotranspiration.R#L2734
"""
self.requirements(constants=['lat_dec_deg', 'altitude', 'ct', 'tx'],
ts=['temp'])
rs = self.rs()
tmp1 = np.multiply(np.multiply(self.cons['ct'], np.add(self.input['temp'], self.cons['tx'])), rs)
et = np.divide(tmp1, LAMBDA)
self.post_process(et, transform=transform)
return et
def post_process(self, et, transform=False):
if isinstance(et, np.ndarray):
et = pd.Series(et, index=self.input.index)
self.output['et_' + self.name + '_' + self.freq_str] = et
if transform:
self.transform_etp(self.name)
def summary(self):
methods_evaluated = []
for m in self.output.keys():
if 'Hourly' in m:
methods_evaluated.append(m)
for m in methods_evaluated:
ts = self.output[m]
yrs = np.unique(ts.index.year)
print('For {} \n'.format(m.split('_')[1], end=','))
for yr in yrs:
st, en = str(yr) + '0101', str(yr) + '1231'
yr_ts = ts[st:en]
yr_sum = yr_ts.sum().values[0]
yr_mean = yr_ts.mean().values[0]
print('for year {}:, sum: {:<10.1f} mean: {:<10.1f}'.format(yr, yr_sum, yr_mean))
class Abtew(ETBase):
"""
daily etp using equation 3 in Abtew_ 1996. `k` is a dimentionless coefficient.
uses: , k=0.52, a_s=0.23, b_s=0.5
:param `k` coefficient, default value taken from [1]
:param `a_s` fraction of extraterrestrial radiation reaching earth on sunless days
:param `b_s` difference between fracion of extraterrestrial radiation reaching full-sun days
and that on sunless days.
.. _Abtew:
https://doi.org/10.1111/j.1752-1688.1996.tb04044.x
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'abtew_k'])
rs = self.rs()
et = np.multiply(self.cons['abtew_k'], np.divide(rs, LAMBDA))
self.post_process(et, kwargs.get('transform', False))
return et
class Albrecht(ETBase):
"""
Developed in Germany by Albrecht, 1950. Djaman et al., 2016 Wrote the formula as
eto = (0.1005 + 0.297 * u2) * (es - ea)
"""
def __call__(self, *args, **kwargs):
# Mean saturation vapour pressure
if 'es' not in self.input:
if self.freq_str == 'Daily':
es = self.mean_sat_vp_fao56()
elif self.freq_str == 'Hourly':
es = self.sat_vp_fao56(self.input['temp'].values)
elif self.freq_str == 'sub_hourly': # TODO should sub-hourly be same as hourly?
es = self.sat_vp_fao56(self.input['temp'].values)
else:
raise NotImplementedError
else:
es = self.input['es']
# actual vapour pressure
ea = self.avp_from_rel_hum()
u2 = self._wind_2m()
eto = (0.1005 + 0.297 * u2) * (es - ea)
self.post_process(eto, kwargs.get('transform', False))
return eto
class BlaneyCriddle(ETBase):
"""
using formulation of Blaney-Criddle for daily reference crop ETP using monthly
mean tmin and tmax. Inaccurate under extreme climates. underestimates in
windy, dry and sunny conditions and overestimates under calm, humid and
clouded conditions.
Doorenbos, J., & Pruitt, W. O. (1977). Crop water requirements, FAO
Irrigation and Drainage. Paper 24, 2a ed., Roma, Italy.
"""
def __call__(self, *args, **kwargs):
# TODO include modified BlaneyCriddle as introduced in [3]
self.requirements(constants=['e0', 'e1', 'e2', 'e3', 'e4']) # check that all constants are present
N = self.daylight_fao56() # mean daily percentage of annual daytime hours
u2 = self._wind_2m()
rh_min = self.input['rh_min']
n = self.input['sunshine_hrs']
ta = self.input['temp'].values
# undefined working variable (Allena and Pruitt, 1986; Shuttleworth, 1992) (S9.8)
a1 = self.cons['e0'] + self.cons['e1'] * rh_min + self.cons['e2'] * n / N
a2 = self.cons['e3'] * u2
a3 = self.cons['e4'] * rh_min * n / N + self.cons['e5'] * rh_min * u2
bvar = a1 + a2 + a3
# calculate yearly sum of daylight hours and assign that value to each point in array `N`
n_annual = assign_yearly(N, self.input.index)
# percentage of actual daytime hours for the day comparing to the annual sum of maximum sunshine hours
p_y = 100 * n / n_annual['N'].values
# reference crop evapotranspiration
et = (0.0043 * rh_min - n / N - 1.41) + bvar * p_y * (0.46 * ta + 8.13)
self.post_process(et, kwargs.get('transform', False))
return et
class BrutsaertStrickler(ETBase):
"""
using formulation given by BrutsaertStrickler
:param `alpha_pt` Priestley-Taylor coefficient = 1.26 for Priestley-Taylor_
model (Priestley and Taylor, 1972)
:param `a_s` fraction of extraterrestrial radiation reaching earth on sunless days
:param `b_s` difference between fracion of extraterrestrial radiation reaching full-sun days
and that on sunless days.
:param `albedo` Any numeric value between 0 and 1 (dimensionless), albedo of the evaporative surface
representing the portion of the incident radiation that is reflected back at the surface.
Default is 0.23 for surface covered with short reference crop.
:return: et
.. _Priestley-Taylor:
https://doi.org/10.1029/WR015i002p00443
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['alphaPT']) # check that all constants are present
delta = self.slope_sat_vp(self.input['temp'].values)
gamma = self.psy_const()
vabar = self.avp_from_rel_hum() # Vapour pressure, *ea*
vas = self.mean_sat_vp_fao56()
u2 = self._wind_2m()
f_u2 = np.add(2.626, np.multiply(1.381, u2))
r_ng = self.net_rad(vabar)
alpha_pt = self.cons['alphaPT']
et = np.subtract(np.multiply(np.multiply((2*alpha_pt-1),
np.divide(delta, np.add(delta, gamma))),
np.divide(r_ng, LAMBDA)),
np.multiply(np.multiply(np.divide(gamma, np.add(delta, gamma)), f_u2),
np.subtract(vas, vabar)))
self.post_process(et, kwargs.get('transform', False))
return et
class Camargo(ETBase):
"""
Originally presented by Camargo, 1971. Following formula is presented in
Fernandes et al., 2012 quoting Sedyiama et al., 1997.
eto = f * Tmean * ra * nd
Gurski et al., 2018 has not written nd in formula. He expressed formula to
convert extra-terresterial radiation into equivalent mm/day as
ra[mm/day] = ra[MegaJoulePerMeterSquare PerDay] / 2.45
where 2.45 is constant.
eto: reference etp in mm/day.
f: an empircal factor taken as 0.01
ra: extraterrestrial radiation expressed as mm/day
nd: length of time interval
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['f_camargo']) # check that all constants are present
ra = self._et_rad()
if self.freq_str == 'Daily':
ra = ra/2.45
else:
raise NotImplementedError
et = self.cons['f_camargo'] * self.input['temp'] * ra
self.post_process(et, kwargs.get('transform', False))
return et
class Caprio(ETBase):
"""
Developed by Caprio (1974). Pandey et al 2016 wrote the equation as
eto = (0.01092708*t + 0.0060706) * rs
"""
def __call__(self, *args, **kwargs):
rs = self.rs()
eto = (0.01092708 * self.input['temp'] + 0.0060706) * rs
self.post_process(eto, kwargs.get('transform', False))
return eto
class ChapmanAustralia(ETBase):
"""using formulation of Chapman_, 2001,
uses: a_s=0.23, b_s=0.5, ap=2.4, alphaA=0.14, albedo=0.23
.. _Chapman:
https://116.90.59.164/MODSIM03/Volume_01/A03/04_Chapman.pdf
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'alphaA', 'pan_ap', 'albedo'],
ts=['temp'])
lat = self.cons['lat_dec_deg']
a_p = 0.17 + 0.011 * abs(lat)
b_p = np.power(10, (0.66 - 0.211 * abs(lat))) # constants (S13.3)
epan = self.evap_pan()
et = np.add(np.multiply(a_p, epan), b_p)
self.post_process(et, kwargs.get('transform', False))
return et
class Copais(ETBase):
"""
Developed for central Greece by Alexandris et al 2006 and used in Alexandris et al 2008.
"""
def __call__(self, *args, **kwargs):
et = None
self.post_process(et, kwargs.get('transform', False))
return et
class Dalton(ETBase):
"""
using Dalton formulation as mentioned here_ in mm/dday
uses:
es: mean saturation vapour pressure
ea: actual vapour pressure
u2: wind speed
.. _here:
https://water-for-africa.org/en/dalton.html
"""
def __call__(self, *args, **kwargs):
u2 = self._wind_2m()
fau = 0.13 + 0.14 * u2
# Mean saturation vapour pressure
if 'es' not in self.input:
if self.freq_str == 'Daily':
es = self.mean_sat_vp_fao56()
elif self.freq_str == 'Hourly':
es = self.sat_vp_fao56(self.input['temp'].values)
elif self.freq_str == 'sub_hourly': # TODO should sub-hourly be same as hourly?
es = self.sat_vp_fao56(self.input['temp'].values)
else:
raise NotImplementedError
else:
es = self.input['es']
# actual vapour pressure
ea = self.avp_from_rel_hum()
if 'vp_def' not in self.input:
vp_d = es - ea # vapor pressure deficit
else:
vp_d = self.input['vp_def']
etp = fau * vp_d
self.post_process(etp, kwargs.get('transform', False))
return etp
class DeBruinKeijman(ETBase):
"""
Calculates daily Pot ETP, developed by deBruin and Jeijman 1979 and used in Rosenberry et al 2004.
"""
class DoorenbosPruitt(ETBase):
"""
Developed by Doorenbos and Pruitt (1777), Poyen et al wrote following equation
et = a(delta/(delta+gamma) * rs) + b
b = -0.3
a = 1.066 - 0.13 x10^{-2} * rh + 0.045*ud - 0.2x10^{-3}*rh * ud - 0.315x10^{-4}*rh**2 - 0.11x10{-2}*ud**2
used in Xu HP 2000.
"""
class GrangerGray(ETBase):
"""
using formulation of Granger & Gray 1989 which is for non-saturated lands and modified form of penman 1948.
uses: , wind_f`='pen48', a_s=0.23, b_s=0.5, albedo=0.23
:param `wind_f` str, if 'pen48 is used then formulation of [1] is used otherwise formulation of [3] requires
wind_f to be 2.626.
:param `a_s fraction of extraterrestrial radiation reaching earth on sunless days
:param `b_s` difference between fracion of extraterrestrial radiation reaching full-sun days
and that on sunless days.
:param `albedo` Any numeric value between 0 and 1 (dimensionless), albedo of the evaporative surface
representing the portion of the incident radiation that is reflected back at the surface.
Default is 0.23 for surface covered with short reference crop.
:return:
https://doi.org/10.1016/0022-1694(89)90249-7
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['wind_f']) # check that all constants are present
if self.cons['wind_f'] not in ['pen48', 'pen56']:
raise ValueError('value of given wind_f is not allowed.')
if self.cons['wind_f'] == 'pen48':
_a = 2.626
_b = 0.09
else:
_a = 1.313
_b = 0.06
# rs = self.rs()
delta = self.slope_sat_vp(self.input['temp'].values)
gamma = self.psy_const()
vabar = self.avp_from_rel_hum() # Vapour pressure
r_n = self.net_rad(vabar) # net radiation
vas = self.mean_sat_vp_fao56()
u2 = self._wind_2m()
fau = _a + 1.381 * u2
ea = np.multiply(fau, np.subtract(vas, vabar))
# dimensionless relative drying power eq 7 in Granger, 1998
dry_pow = np.divide(ea, np.add(ea, np.divide(np.subtract(r_n, self.soil_heat_flux()), LAMBDA)))
# eq 6 in Granger, 1998
g_g = 1 / (0.793 + 0.20 * np.exp(4.902 * dry_pow)) + 0.006 * dry_pow
tmp1 = np.divide(np.multiply(delta, g_g), np.add(np.multiply(delta, g_g), gamma))
tmp2 = np.divide(np.subtract(r_n, self.soil_heat_flux()), LAMBDA)
tmp3 = np.multiply(np.divide(np.multiply(gamma, g_g), np.add(np.multiply(delta, g_g), gamma)), ea)
et = np.add(np.multiply(tmp1, tmp2), tmp3)
self.post_process(et, kwargs.get('transform', False))
return et
class Hamon(ETBase):
"""calculates evapotranspiration in mm using Hamon 1963 method as given in Lu et al 2005. It uses daily mean
temperature which can also be calculated
from daily max and min temperatures. It also requires `daylight_hrs` which is hours of day light, which if not
provided as input, will be calculated from latitutde. This means if `daylight_hrs` timeseries is not provided as
input, then argument `lat` must be provided.
pet = cts * n * n * vdsat
vdsat = (216.7 * vpsat) / (tavc + 273.3)
vpsat = 6.108 * exp((17.26939 * tavc)/(tavc + 237.3))
:uses cts: float, or array of 12 values for each month of year or a time series of equal length as input data.
if it is float, then that value will be considered for whole year. Default value of 0.0055 was used
by Hamon 1961, although he later used different value but I am using same value as it is used by
WDMUtil. It should be also noted that 0.0055 is to be used when pet is in inches. So I am dividing
the whole pet by 24.5 in order to convert from inches to mm while still using 0.0055.
References
----------
Hamon, W. R. (1963). Computation of direct runoff amounts from storm rainfall. International Association of
Scientific Hydrology Publication, 63, 52-62.
Lu et al. (2005). A comparison of six potential evaportranspiration methods for regional use in the
southeastern United States. Journal of the American Water Resources Association, 41, 621-633.
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'albedo', 'cts'],
ts=['temp'])
# allow cts to be provided as input while calling method, e.g we may want to use array
if 'cts' in kwargs:
cts = kwargs['cts']
else:
cts = self.cons['cts']
if 'sunshine_hrs' not in self.input.columns:
if 'daylight_hrs' not in self.input.columns:
daylight_hrs = self.daylight_fao56()
else:
daylight_hrs = self.input['daylight_hrus']
sunshine_hrs = daylight_hrs
print('Warning, sunshine hours are consiered equal to daylight hours')
else:
sunshine_hrs = self.input['sunshine_hrs']
sunshine_hrs = np.divide(sunshine_hrs, 12.0)
# preference should be given to tmin and tmax if provided and if tmin, tmax is not provided then use temp which
# is mean temperature. This is because in original equations, vd_sat is calculated as average of max vapour
# pressure and minimum vapour pressue.
if 'tmax' not in self.input.columns:
if 'temp' not in self.input.columns:
raise ValueError('Either tmax and tmin or mean temperature should be provided as input')
else:
vd_sat = self.sat_vp_fao56(self.input['temp'])
else:
vd_sat = self.mean_sat_vp_fao56()
# in some literature, the equation is divided by 100 by then the cts value is 0.55 instead of 0.0055
et = cts * 25.4 * np.power(sunshine_hrs, 2) * (216.7 * vd_sat * 10 / (np.add(self.input['temp'], 273.3)))
self.post_process(et, kwargs.get('transform', False))
return et
class HargreavesSamani(ETBase):
"""
estimates daily ETo using Hargreaves method Hargreaves and Samani_, 1985.
:uses
temp
tmin
tmax
:param
method: str, if `1985`, then the method of 1985 (Hargreaves and Samani, 1985) is followed as calculated by and
mentioned by Hargreaves and Allen, 2003.
if `2003`, then as formula is used as mentioned in [1]
Note: Current test passes for 1985 method.
There is a variation of Hargreaves introduced by Trajkovic 2007 as mentioned in Alexandris 2008.
.. _Samani:
https://rdrr.io/cran/Evapotranspiration/man/ET.HargreavesSamani.html
"""
def __call__(self, method='1985', **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'albedo'],
ts=['temp'])
if method == '2003':
tmp1 = np.multiply(0.0023, np.add(self.input['temp'], 17.8))
tmp2 = np.power(np.subtract(self.input['tmax'].values, self.input['tmin'].values), 0.5)
tmp3 = np.multiply(0.408, self._et_rad())
et = np.multiply(np.multiply(tmp1, tmp2), tmp3)
else:
ra_my = self._et_rad()
tmin = self.input['tmin'].values
tmax = self.input['tmax'].values
ta = self.input['temp'].values
# empirical coefficient by Hargreaves and Samani (1985) (S9.13)
c_hs = 0.00185 * np.power((np.subtract(tmax, tmin)), 2) - 0.0433 * (np.subtract(tmax, tmin)) + 0.4023
et = 0.0135 * c_hs * ra_my / LAMBDA * np.power((np.subtract(tmax, tmin)), 0.5) * (np.add(ta, 17.8))
self.post_process(et, kwargs.get('transform', False))
return et
class Haude(ETBase):
"""
only requires air temp and relative humidity at 2:00 pm. Good for moderate
zones despite being simple
References
----------
[1]. Haude, W. (1954). Zur praktischen Bestimmung der aktuellen und potentiellen Evaporation und Evapotranspiration.
Schweinfurter Dr. und Verlag-Ges..
"""
def __call__(self, *args, **kwargs):
etp = None # f_mon * (6.11 × 10(7.48 × T / (237+T)) - rf × es)
self.post_process(etp, kwargs.get('transform', False))
return etp
class JensenHaiseBasins(ETBase):
"""
This method generates daily pan evaporation (inches) using a coefficient for the month `cts`, , the daily
average air temperature (F), a coefficient `ctx`, and solar radiation (langleys/day) as givn in
BASINS program[2].
The computations are
based on the Jensen and Haise (1963) formula.
PET = CTS * (TAVF - CTX) * RIN
where
PET = daily potential evapotranspiration (in)
CTS = monthly variable coefficient
TAVF = mean daily air temperature (F), computed from max-min
CTX = coefficient
RIN = daily solar radiation expressed in inches of evaporation
RIN = SWRD/(597.3 - (.57 * TAVC)) * 2.54
where
SWRD = daily solar radiation (langleys)
TAVC = mean daily air temperature (C)
:uses cts float or array like. Value of monthly coefficient `cts` to be used. If float, then same value is
assumed for all months. If array like then it must be of length 12.
:uses ctx `float` constant coefficient value of `ctx` to be used in Jensen and Haise formulation.
"""
def __call__(self, *args, **kwargs):
if 'cts_jh' in kwargs:
cts = kwargs['cts_jh']
else:
cts = self.cons['cts_jh']
if 'cts_jh' in kwargs:
ctx = kwargs['ctx_jh']
else:
ctx = self.cons['ctx_jh']
if not isinstance(cts, float):
if not isinstance(np.array(ctx), np.ndarray):
raise ValueError('cts must be array like')
else: # if cts is array like it must be given for 12 months of year, not more not less
if len(np.array(cts)) > 12:
raise ValueError('cts must be of length 12')
else: # if only one value is given for all moths distribute it as monthly value
cts = np.array([cts for _ in range(12)])
if not isinstance(ctx, float):
raise ValueError('ctx must be float')
# distributing cts values for all dates of input data
self.input['cts'] = np.nan
for m, i in zip(self.input.index.month, self.input.index):
for _m in range(m):
self.input.at[i, 'cts'] = cts[_m]
cts = self.input['cts']
taf = self.input['temp'].values
rad_in = self.rad_to_evap()
pan_evp = np.multiply(np.multiply(cts, np.subtract(taf, ctx)), rad_in)
et = np.where(pan_evp < 0.0, 0.0, pan_evp)
self.post_process(et, kwargs.get('transform', False))
return et
class Kharrufa(ETBase):
"""
For monthly potential evapotranspiration estimation, originally presented by Kharrufa, 1885. Xu and Singh, 2001
presented following formula:
et = 0.34 * p * Tmean**1.3
et: pot. evapotranspiration in mm/month.
Tmean: Average temperature in Degree Centigrade
p: percentage of total daytime hours for the period used (daily or monthly) outof total daytime hours of the
year (365 * 12)
Kharrufa, N. S. (1985). Simplified equation for evapotranspiration in arid regions. Beitrage zur
Hydrologie, 5(1), 39-47.
"""
def __call__(self, *args, **kwargs):
ta = self.input['temp']
N = self.daylight_fao56() # mean daily percentage of annual daytime hours
n_annual = assign_yearly(N, self.input.index)
et = 0.34 * n_annual['N'].values * ta**1.3
self.post_process(et, kwargs.get('transform', False))
return et
class Linacre(ETBase):
"""
using formulation of Linacre 1977 who simplified Penman method.
:uses
temp
tdew/rel_hum
https://doi.org/10.1016/0002-1571(77)90007-3
"""
def __call__(self, *args, **kwargs):
if 'tdew' not in self.input:
if 'rel_hum' in self.input:
self.tdew_from_t_rel_hum()
tm = np.add(self.input['temp'].values, np.multiply(0.006, self.cons['altitude']))
tmp1 = np.multiply(500, np.divide(tm, 100 - self.cons['lat_dec_deg']))
tmp2 = np.multiply(15, np.subtract(self.input['temp'].values, self.input['tdew'].values))
upar = np.add(tmp1, tmp2)
et = np.divide(upar, np.subtract(80, self.input['temp'].values))
self.post_process(et, kwargs.get('transform', False))
return et
class Makkink(ETBase):
"""
:uses
a_s, b_s
temp
solar_rad
using formulation of Makkink
"""
def __call__(self, *args, **kwargs):
rs = self.rs()
delta = self.slope_sat_vp(self.input['temp'].values)
gamma = self.psy_const()
et = np.subtract(np.multiply(np.multiply(0.61, np.divide(delta, np.add(delta, gamma))),
np.divide(rs, 2.45)), 0.12)
self.post_process(et, kwargs.get('transform', False))
return et
class Irmak(ETBase):
"""
Pandey et al 2016, presented 3 formulas for Irmak.
1 eto = -0.611 + 0.149 * rs + 0.079 * t
2 eto = -0.642 + 0.174 * rs + 0.0353 * t
3 eto = -0.478 + 0.156 * rs - 0.0112 * tmax + 0.0733 * tmin
References:
Irmak 2003
Tabari et al 2011
Pandey et al 2016
"""
class Mahringer(ETBase):
"""
Developed by Mahringer in Germany. [1] Wrote formula as
eto = 0.15072 * sqrt(3.6) * (es - ea)
"""
class Mather(ETBase):
"""
Developed by Mather 1978 and used in Rosenberry et al 2004. Calculates daily Pot ETP.
pet = [1.6 (10T_a/I) ** 6.75e-7 * I**3 - 7.71e-7 * I**2 + 1.79e-2 * I + 0.49] (10/d)
I = annual heat index, sum(Ta/5)1.514
d = number of days in month
"""
class MattShuttleworth(ETBase):
"""
using formulation of Matt-Shuttleworth and Wallace, 2009. This is designed for semi-arid and windy areas as an
alternative to FAO-56 Reference Crop method
10.13031/2013.29217
https://elibrary.asabe.org/abstract.asp?aid=29217
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['CH', 'Roua', 'Ca', 'surf_res'])
ch = self.cons['CH'] # crop height
ro_a = self.cons['Roua']
ca = self.cons['Ca'] # specific heat of the air
# surface resistance (s m-1) of a well-watered crop equivalent to the FAO crop coefficient
r_s = self.cons['surf_res']
vabar = self.avp_from_rel_hum() # Vapour pressure
vas = self.mean_sat_vp_fao56()
r_n = self.net_rad(vabar) # net radiation
u2 = self._wind_2m() # Wind speed
delta = self.slope_sat_vp(self.input['temp'].values) # slope of vapour pressure curve
gam = self.psy_const() # psychrometric constant
tmp1 = self.seconds * ro_a * ca
# clinmatological resistance (s*m^-1) (S5.34)
r_clim = np.multiply(tmp1, np.divide(np.subtract(vas, vabar), np.multiply(delta, r_n)))
r_clim = np.where(r_clim == 0, 0.1, r_clim) # correction for r_clim = 0
u2 = np.where(u2 == 0, 0.1, u2) # correction for u2 = 0
# ratio of vapour pressure deficits at 50m to vapour pressure deficits at 2m heights, eq S5.35
a1 = (302 * (delta + gam) + 70 * gam * u2)
a2 = (208 * (delta + gam) + 70 * gam * u2)
a3 = 1/r_clim * ((302 * (delta + gam) + 70 * gam * u2) / (208 * (delta + gam) + 70 * gam * u2) * (208 / u2) - (302 / u2))
vpd50_to_vpd2 = a1/a2 + a3
# aerodynamic coefficient for crop height (s*m^-1) (eq S5.36 in McMohan et al 2013)
a1 = 1 / (0.41**2)
a2 = np.log((50 - 0.67 * ch) / (0.123 * ch))
a3 = np.log((50 - 0.67 * ch) / (0.0123 * ch))
a4 = np.log((2 - 0.08) / 0.0148) / np.log((50 - 0.08) / 0.0148)
rc_50 = a1 * a2 * a3 * a4
a1 = 1/LAMBDA
a2 = (delta * r_n + (ro_a * ca * u2 * (vas - vabar)) / rc_50 * vpd50_to_vpd2)
a3 = (delta + gam * (1 + r_s * u2 / rc_50))
et = a1 * a2/a3
self.post_process(et, kwargs.get('transform', False))
return et
class McGuinnessBordne(ETBase):
"""
calculates evapotranspiration [mm/day] using Mcguinnes Bordne formulation McGuinnes and Bordne, 1972.
"""
def __call__(self, *args, **kwargs):
ra = self._et_rad()
# latent heat of vaporisation, MJ/Kg
_lambda = LAMBDA # multiply((2.501 - 2.361e-3), self.input['temp'].values)
tmp1 = np.multiply((1/_lambda), ra)
tmp2 = np.divide(np.add(self.input['temp'].values, 5), 68)
et = np.multiply(tmp1, tmp2)
self.post_process(et, kwargs.get('transform', False))
return et
class Penman(ETBase):
"""
calculates pan evaporation from open water using formulation of Penman, 1948, as mentioned (as eq 12) in
McMahon et al., 2012. If wind data is missing then equation 33 from Valiantzas, 2006 is used which does not require
wind data.
uses: wind_f='pen48', a_s=0.23, b_s=0.5, albedo=0.23
uz
temp
rs
reh_hum
:param `wind_f` str, if 'pen48 is used then formulation of [1] is used otherwise formulation of [3] requires
wind_f to be 2.626.
"""
# todo, gives -ve values sometimes
def __call__(self, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'wind_f', 'albedo'],
ts=['temp', 'rh_mean'])
if self.cons['wind_f'] not in ['pen48', 'pen56']:
raise ValueError('value of given wind_f is not allowed.')
wind_method = 'macmohan'
if 'wind_method' in kwargs:
wind_method = kwargs['wind_method']
if self.cons['wind_f'] == 'pen48':
_a = 2.626
_b = 0.09
else:
_a = 1.313
_b = 0.06
delta = self.slope_sat_vp(self.input['temp'].values)
gamma = self.psy_const()
rs = self.rs()
vabar = self.avp_from_rel_hum() # Vapour pressure *ea*
r_n = self.net_rad(vabar, rs) # net radiation
vas = self.mean_sat_vp_fao56()
if 'wind_speed' in self.input.columns:
if self.verbosity > 1:
print("Wind data have been used for calculating the Penman evaporation.")
u2 = self._wind_2m(method=wind_method)
fau = _a + 1.381 * u2
ea = np.multiply(fau, np.subtract(vas, vabar))
tmp1 = np.divide(delta, np.add(delta, gamma))
tmp2 = np.divide(r_n, LAMBDA)
tmp3 = np.multiply(np.divide(gamma, np.add(delta, gamma)), ea)
evap = np.add(np.multiply(tmp1, tmp2), tmp3)
# if wind data is not available
else:
if self.verbosity > 1:
print("Alternative calculation for Penman evaporation without wind data has been performed")
ra = self._et_rad()
tmp1 = np.multiply(np.multiply(0.047, rs), np.sqrt(np.add(self.input['temp'].values, 9.5)))
tmp2 = np.multiply(np.power(np.divide(rs, ra), 2.0), 2.4)
tmp3 = np.multiply(_b, np.add(self.input['temp'].values, 20))
tmp4 = np.subtract(1, np.divide(self.input['rh_mean'].values, 100))
tmp5 = np.multiply(tmp3, tmp4)
evap = np.add(np.subtract(tmp1, tmp2), tmp5)
self.post_process(evap, kwargs.get('transform', False))
return evap
class PenPan(ETBase):
"""
implementing the PenPan formulation for Class-A pan evaporation as given in Rotstayn et al., 2006
"""
def __call__(self, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'pen_ap', 'albedo', 'alphaA', 'pan_over_est',
'pan_est'],
ts=['temp', 'wind_speed'])
epan = self.evap_pan()
et = epan
if self.cons['pan_over_est']:
if self.cons['pan_est'] == 'pot_et':
et = np.multiply(np.divide(et, 1.078), self.cons['pan_coef'])
else:
et = np.divide(et, 1.078)
self.post_process(et, kwargs.get('transform', False))
return et
class PenmanMonteith(ETBase):
"""
calculates reference evapotrnaspiration according to Penman-Monteith (Allen et al 1998) equation which is
also recommended by FAO. The etp is calculated at the time step determined by the step size of input data.
For hourly or sub-hourly calculation, equation 53 is used while for daily time step equation 6 is used.
# Requirements
Following timeseries data is used
relative humidity
temperature
Following constants are used
lm=None, a_s=0.25, b_s=0.5, albedo=0.23
http://www.fao.org/3/X0490E/x0490e08.htm#chapter%204%20%20%20determination%20of%20eto
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'albedo', 'a_s', 'b_s'],
ts=['temp', 'wind_speed', 'jday'])
wind_2m = self._wind_2m()
d = self.slope_sat_vp(self.input['temp'].values)
g = self.psy_const()
# Mean saturation vapour pressure
if 'es' not in self.input:
if self.freq_in_mins == 1440:
es = self.mean_sat_vp_fao56()
elif self.freq_in_mins == 60:
es = self.sat_vp_fao56(self.input['temp'].values)
elif self.freq_in_mins < 60: # TODO should sub-hourly be same as hourly?
es = self.sat_vp_fao56(self.input['temp'].values)
else:
raise NotImplementedError
else:
es = self.input['es']
# actual vapour pressure
ea = self.avp_from_rel_hum()
if 'vp_def' not in self.input:
vp_d = es - ea # vapor pressure deficit
else:
vp_d = self.input['vp_def']
rn = self.net_rad(ea) # eq 40 in Fao
_g = self.soil_heat_flux(rn)
t1 = 0.408 * (d*(rn - _g))
nechay = d + g*(1 + 0.34 * wind_2m)
if self.freq_in_mins == 1440:
t5 = t1 / nechay
t6 = 900/(self.input['temp']+273) * wind_2m * vp_d * g / nechay
pet = np.add(t5, t6)
elif self.freq_in_mins < 1440: # TODO should sub-hourly be same as hourly?
t3 = np.multiply(np.divide(37, self.input['temp']+273.0), g)
t4 = np.multiply(t3, np.multiply(wind_2m, vp_d))
upar = t1 + t4
pet = upar / nechay
else:
raise NotImplementedError("For frequency of {} minutes, {} method can not be implemented"
.format(self.freq_in_mins, self.name))
self.post_process(pet, kwargs.get('transform', False))
return pet
class PriestleyTaylor(ETBase):
"""
following formulation of Priestley & Taylor, 1972.
uses: , a_s=0.23, b_s=0.5, alpha_pt=1.26, albedo=0.23
:param `alpha_pt` Priestley-Taylor coefficient = 1.26 for Priestley-Taylor model (Priestley and Taylor, 1972)
https://doi.org/10.1175/1520-0493(1972)100<0081:OTAOSH>2.3.CO;2
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'alpha_pt', 'albedo'])
delta = self.slope_sat_vp(self.input['temp'].values)
gamma = self.psy_const()
vabar = self.avp_from_rel_hum() # *ea*
r_n = self.net_rad(vabar) # net radiation
# vas = self.mean_sat_vp_fao56()
tmp1 = np.divide(delta, np.add(delta, gamma))
tmp2 = np.multiply(tmp1, np.divide(r_n, LAMBDA))
tmp3 = np.subtract(tmp2, np.divide(self.soil_heat_flux(), LAMBDA))
et = np.multiply(self.cons['alpha_pt'], tmp3)
self.post_process(et, kwargs.get('transform', False))
return et
class Romanenko(ETBase):
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'albedo'],
ts=['temp'])
"""
using formulation of Romanenko
uses:
temp
rel_hum
There are two variants of it in Song et al 2017.
https://www.scirp.org/(S(czeh2tfqyw2orz553k1w0r45))/reference/ReferencesPapers.aspx?ReferenceID=2151471
"""
t = self.input['temp'].values
vas = self.mean_sat_vp_fao56()
vabar = self.avp_from_rel_hum() # Vapour pressure *ea*
tmp1 = np.power(np.add(1, np.divide(t, 25)), 2)
tmp2 = np.subtract(1, np.divide(vabar, vas))
et = np.multiply(np.multiply(4.5, tmp1), tmp2)
self.post_process(et, kwargs.get('transform', False))
return et
class SzilagyiJozsa(ETBase):
"""
using formulation of Azilagyi, 2007.
https://doi.org/10.1029/2006GL028708
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['wind_f', 'alphaPT'])
if self.cons['wind_f'] == 'pen48':
_a = 2.626
_b = 0.09
else:
_a = 1.313
_b = 0.06
alpha_pt = self.cons['alphaPT'] # Priestley Taylor constant
delta = self.slope_sat_vp(self.input['temp'].values)
gamma = self.psy_const()
rs = self.rs()
vabar = self.avp_from_rel_hum() # Vapour pressure *ea*
r_n = self.net_rad(vabar) # net radiation
vas = self.mean_sat_vp_fao56()
if 'uz' in self.input.columns:
if self.verbosity > 1:
print("Wind data have been used for calculating the Penman evaporation.")
u2 = self._wind_2m()
fau = _a + 1.381 * u2
ea = np.multiply(fau, np.subtract(vas, vabar))
tmp1 = np.divide(delta, np.add(delta, gamma))
tmp2 = np.divide(r_n, LAMBDA)
tmp3 = np.multiply(np.divide(gamma, np.add(delta, gamma)), ea)
et_penman = np.add(np.multiply(tmp1, tmp2), tmp3)
# if wind data is not available
else:
if self.verbosity > 1:
print("Alternative calculation for Penman evaporation without wind data have been performed")
ra = self._et_rad()
tmp1 = np.multiply(np.multiply(0.047, rs), np.sqrt(np.add(self.input['temp'].values, 9.5)))
tmp2 = np.multiply(np.power(np.divide(rs, ra), 2.0), 2.4)
tmp3 = np.multiply(_b, np.add(self.input['temp'].values, 20))
tmp4 = np.subtract(1, np.divide(self.input['rh_mean'].values, 100))
tmp5 = np.multiply(tmp3, tmp4)
et_penman = np.add(np.subtract(tmp1, tmp2), tmp5)
# find equilibrium temperature T_e
t_e = self.equil_temp(et_penman)
delta_te = self.slope_sat_vp(t_e) # slope of vapour pressure curve at T_e
# Priestley-Taylor evapotranspiration at T_e
et_pt_te = np.multiply(alpha_pt, np.multiply(np.divide(delta_te, np.add(delta_te, gamma)), np.divide(r_n, LAMBDA)))
et = np.subtract(np.multiply(2, et_pt_te), et_penman)
self.post_process(et, kwargs.get('transform', False))
return et
class Thornthwait(ETBase):
"""calculates reference evapotrnaspiration according to empirical temperature based Thornthwaite
(Thornthwaite 1948) method. The method actualy calculates both ETP and evaporation. It requires only temperature
and day length as input. Suitable for monthly values.
"""
def __call__(self, *args, **kwargs):
if 'daylight_hrs' not in self.input.columns:
day_hrs = self.daylight_fao56()
else:
day_hrs = self.input['daylight_hrs']
self.input['adj_t'] = np.where(self.input['temp'].values < 0.0, 0.0, self.input['temp'].values)
I = self.input['adj_t'].resample('A').apply(custom_resampler) # heat index (I)
a = (6.75e-07 * I ** 3) - (7.71e-05 * I ** 2) + (1.792e-02 * I) + 0.49239
self.input['a'] = a
a_mon = self.input['a'] # monthly values filled with NaN
a_mon = pd.DataFrame(a_mon)
a_ann = pd.DataFrame(a)
a_monthly = a_mon.merge(a_ann, left_index=True, right_index=True, how='left').fillna(method='bfill')
self.input['I'] = I
i_mon = self.input['I'] # monthly values filled with NaN
i_mon = pd.DataFrame(i_mon)
i_ann = pd.DataFrame(I)
i_monthly = i_mon.merge(i_ann, left_index=True, right_index=True, how='left').fillna(method='bfill')
tmp1 = np.multiply(1.6, np.divide(day_hrs, 12.0))
tmp2 = np.divide(self.input.index.daysinmonth, 30.0)
tmp3 = np.multiply(np.power(np.multiply(10.0, np.divide(self.input['temp'].values, i_monthly['I'].values)),
a_monthly['a'].values), 10.0)
pet = np.multiply(tmp1, np.multiply(tmp2, tmp3))
# self.input['Thornthwait_daily'] = np.divide(self.input['Thornthwait_Monthly'].values, self.input.index.days_in_month)
self.post_process(pet, kwargs.get('transform', False))
return pet
class MortonCRAE(ETBase):
"""
for monthly pot. ET and wet-environment areal ET and actual ET by Morton 1983.
:return:
"""
class Papadakis(ETBase):
"""
Calculates monthly values based on saturation vapor pressure and temperature. Following equation is given by
eto = 0.5625 * (ea_tmax - ed)
ea: water pressure corresponding to avg max temperature [KiloPascal].
ed: saturation water pressure corresponding to the dew point temperature [KiloPascal].
Rosenberry et al., 2004 presented following equation quoting McGuinnes and Bordne, 1972
pet = 0.5625 * [es_max - (es_min - 2)] (10/d)
d = number of days in month
es = saturated vapour pressure at temperature of air in millibars
"""
class Ritchie(ETBase):
"""
Given by Jones and Ritchie 1990 and quoted by Valipour, 2005 and Pandey et al., 2016
et = rs * alpha [0.002322 * tmax + 0.001548*tmin + 0.11223]
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['ritchie_a', 'ritchie_b', 'ritchie_b', 'ritchie_alpha'],
ts=['tmin', 'tmax'])
ritchie_a = self.cons['ritchie_a']
ritchie_b = self.cons['ritchie_b']
ritchie_c = self.cons['ritchie_c']
alpha = self.cons['ritchie_alpha']
rs = self.rs()
eto = rs * alpha * [ritchie_a * self.input['tmax'] + ritchie_b * self.input['tmin'] + ritchie_c]
self.post_process(eto, kwargs.get('transform', False))
return eto
class Turc(ETBase):
"""
The original formulation is from Turc, 1961 which was developed for southern France and Africa.
Pandey et al 2016 mentioned a modified version of Turc quoting Xu et al., 2008, Singh, 2008 and Chen and Chen, 2008.
eto = alpha_t * 0.013 T/(T+15) ( (23.8856Rs + 50)/gamma)
A shorter version of this formula is quoted by Valipour, 2015 quoting Xu et al., 2008
eto = (0.3107 * Rs + 0.65) [T alpha_t / (T + 15)]
Here it is implemented as given (as eq 5) in Alexandris, et al., 2008 which is;
for rh > 50 %:
eto = 0.0133 * [T_mean / (T_mean + 15)] ( Rs + 50)
for rh < 50 %:
eto = 0.0133 * [T_mean / (T_mean + 15)] ( Rs + 50) [1 + (50 - Rh) / 70]
uses
:param `k` float or array like, monthly crop coefficient. A single value means same crop coefficient for
whole year
:param `a_s` fraction of extraterrestrial radiation reaching earth on sunless days
:param `b_s` difference between fracion of extraterrestrial radiation reaching full-sun days
and that on sunless days.
Turc, L. (1961). Estimation of irrigation water requirements, potential evapotranspiration: a simple climatic
formula evolved up to date. Ann. Agron, 12(1), 13-49.
"""
def __call__(self, *args, **kwargs):
self.requirements(constants=['lat_dec_deg', 'altitude', 'turc_k'],
ts=['temp'])
use_rh = False # because while testing daily, rhmin and rhmax are given and rhmean is calculated by default
if 'use_rh' in kwargs:
use_rh = kwargs['use_rh']
rs = self.rs()
ta = self.input['temp'].values
et = np.multiply(np.multiply(self.cons['turc_k'], (np.add(np.multiply(23.88, rs), 50))),
np.divide(ta, (np.add(ta, 15))))
if use_rh:
if 'rh_mean' in self.input.columns:
rh_mean = self.input['rh_mean'].values
eq1 = np.multiply(np.multiply(np.multiply(self.cons['turc_k'], (np.add(np.multiply(23.88, rs), 50))),
np.divide(ta, (np.add(ta, 15)))),
(np.add(1, np.divide((np.subtract(50, rh_mean)), 70))))
eq2 = np.multiply(np.multiply(self.cons['turc_k'], (np.add(np.multiply(23.88, rs), 50))),
np.divide(ta, (np.add(ta, 15))))
et = np.where(rh_mean < 50, eq1, eq2)
self.post_process(et, kwargs.get('transform', False))
return et
class Valiantzas(ETBase):
"""
Djaman 2016 mentioned 2 methods from him, however Valipour 2015 tested 5 variants of his formulations in Iran.
Ahmad et al 2019 used 6 variants of this method however, Djaman et al., 2017 used 9 of its variants.
These 9 methods are given below:
method_1:
This is equation equation 19 in Valiantzas, 2012. This also does not require wind data.
eto = 0.0393 * Rs* sqrt(T_avg + 9.5) - (0.19 * Rs**0.6 * lat_rad**0.15)
+ 0.0061(T_avg + 20)(1.12*_avg - T_min - 2)**0.7
method_2:
This is equation 14 in Valiantzas, 2012. This does not require wind data. The recommended value of alpha is 0.23.
eto = 0.0393 * Rs * sqrt(T_avg + 9.5) - (0.19 * Rs**0.6 * lat_rad**0.15) + 0.078(T_avg + 20)(1 - rh/100)
method_3
eto = 0.0393 * Rs * sqrt(T_avg + 9.5) - (Rs/Ra)**2 - [(T_avg + 20) * (1-rh/100) * ( 0.024 - 0.1 * Waero)]
method_4:
This is equation 35 in Valiantzas 2013c paper and was reffered as Fo-PENM method with using alpha as 0.25.
eto = 0.051 * (1 - alpha) * Rs * sqrt(T_avg + 9.5) - 2.4 * (Rs/Ra)**2
+ [0.048 * (T_avg + 20) * ( 1- rh/100) * (0.5 + 0.536 * u2)] + (0.00012 * z)
method_5:
This is equation 30 in Valiantzas 2013c. This is when no wind data is available.
eto = 0.0393 * Rs sqrt(T_avg + 9.5)
- [2.46 * Rs * lat**0.15 / (4 * sin(2 * pi * J / 365 - 1.39) lat + 12)**2 + 0.92]**2
- 0.024 * (T_avg + 20)(1 - rh/100) - (0.0268 * Rs)
+ (0.0984 * (T_avg + 17)) * (1.03 + 0.00055) * (T_max - T_min)**2 - rh/100
method_6
This method is when wind speed and solar radiation data is not available. This is equation 34 in
Valiantzas, 2013c.
eto = 0.0068 * Ra * sqrt[(T_avg + 9.5) * (T_max - T_min)]
- 0.0696 * (T_max - T_min) - 0.024 * (T_avg + 20)
* [ ((1-rh/100) - 0.00455 * Ra * sqrt(T_max - T_dew)
+ 0.0984 * (T_avg + 17) * (1.03 + 0.0055) * (T_max - T_min)**2)
- rh/100
method_7:
This is equation 27 in Valiantzas, 2013c. This method requires all data. Djaman et al., (by mistake) used 0.0043
in denominator instead of 0.00043.
eto = 0.051 * (1-alpha) * Rs * (T_avg + 9.5)**0.5
- 0.188 * (T_avg + 13) * (Rs/Ra - 0.194)
* (1 - 0.00015) * (T_avg + 45)**2 * sqrt(rh/100)
- 0.0165 * Rs * u**0.7 + 0.0585 * (T_avg + 17) * u**0.75
* {[1 + 0.00043 * (T_max - T_min)**2]**2 - rh/100} / [1 + 0.00043 * (T_max - T_min)**2 + 0.0001*z]
method_8:
eto = 0.051 * (1-alpha) * Rs * (T_avg + 9.5)**0.5
- 2.4 (Rs/Ra)**2 - 2.4 * (T_avg + 20) * (1 - rh/100)
- 0.0165 * Rs * u**0.7 + 0.0585 * (T_avg + 17) * u**0.75
* { [1 + 0.00043 (T_max - T_min)**2]**2 - rh/100} / ( 1 + 0.00043 * (T_max - T_min)**2 + (0.0001 * z)
method_9:
This must be equation 29 of Valiantzas, 2013c but method 9 in Djaman et al., 2017 used 2.46 instead of 22.46. This
formulation does not require Ra.
eto = [0.051 * (1-alpha) * Rs (T_avg + 9.5)**2
* (2.46 * Rs * lat**0.15 / (4 sin(2 * pi J / 365 - 1.39) * lat + 12)**2 + 0.92)]**2
- 0.024 * (T_avg + 20) * (1-rh/100) - 0.0165 * Rs * u**0.7
+ 0.0585 * (T_avg + 17) * u**0.75 * {[(1.03 + 0.00055) * (T_max - T_min)**2 - rh/100] + 0.0001*z}
"""
def __call__(self, method='method_1', **kwargs):
self.requirements(constants=['valiantzas_alpha'],
ts=['temp'])
alpha = self.cons['valiantzas_alpha']
z = self.cons['altitute']
rh = self.input['rh']
ta = self.input['temp']
tmin = self.input['tmin']
tmax = self.input['tmax']
j = self.input['jday']
ra = self._et_rad()
u2 = self._wind_2m()
w_aero = np.where(rh <= 65.0, 1.067, 0.78) # empirical weighted factor
rs_ra = (self.rs() / ra)**2
tr = tmax - tmin
tr_sq = tr**2
lat_15 = self.lat_rad**0.15
t_sqrt = np.sqrt(ta + 9.5)
init = 0.0393 * self.rs() * t_sqrt
rs_fact = 0.19 * (self.rs()**0.6) * lat_15
t_20 = ta + 20
rh_factor = 1.0 - (rh/100.0)
if method == 'method_1':
eto = init - rs_fact + 0.0061 * t_20 * (1.12 * (ta - tmin) - 2.0)**0.7
elif method == 'method_2':
eto = init - rs_fact + (0.078 * t_20 * rh_factor)
elif method == 'method_3':
eto = init - rs_ra - (t_20 * rh_factor * (0.024 - 0.1 * w_aero))
elif method == 'method_4':
eto = 0.051 * (1 - alpha) * self.rs() * t_sqrt - 2.4 * rs_ra + (0.048 * t_20 * rh_factor * (0.5 + 0.536 * u2)) + (0.00012 * z)
elif method == 'method_5':
eto = init
elif method == 'method_6':
pass
elif method == 'method_7':
pass
elif method == 'method_8':
pass
elif method == 'method_9':
eto = 0.051 * (1 - alpha) * self.rs() * (ta + 9.5)**2 * (2.46 * self.rs() * lat_15) / (4 * np.sin(2 * 3.14 * j / 365 - 1.39))
else:
raise ValueError
self.post_process(eto, kwargs.get('transform', False))
return eto
class Oudin(ETBase):
"""
https://doi.org/10.1016/j.jhydrol.2004.08.026
"""
pass
class RengerWessolek(ETBase):
"""
RENGER, M. & WESSOLEK, G. (1990): Auswirkungen von Grundwasserabsenkung und Nutzungsänderungen auf die
Grundwasserneubildung. – Mit. Inst. für Wasserwesen, Univ. der Bundeswehr München, 386: 295-307.
"""
class Black(ETBase):
"""
https://doi.org/10.2136/sssaj1969.03615995003300050013x
"""
class McNaughtonBlack(ETBase):
"""
https://doi.org/10.1029/WR009i006p01579
"""
def custom_resampler(array_like):
"""calculating heat index using monthly values of temperature."""
return np.sum(np.power(np.divide(array_like, 5.0), 1.514))
def assign_yearly(data, index):
# TODO for leap years or when first or final year is not complete, the results are not correct immitate
# https://github.com/cran/Evapotranspiration/blob/master/R/Evapotranspiration.R#L1848
""" assigns `step` summed data to whole data while keeping the length of data preserved."""
n_ts = pd.DataFrame(data, index=index, columns=['N'])
a = n_ts.resample('A').sum() # annual sum
ad = a.resample('D').backfill() # annual sum backfilled
# for case
if len(ad) < 2:
ad1 = pd.DataFrame(np.full(data.shape, np.nan), pd.date_range(n_ts.index[0], periods=len(data), freq='D'),
columns=['N'])
ad1.loc[ad1.index[-1]] = ad.values
ad2 = ad1.bfill()
return ad2
else:
idx = pd.date_range(n_ts.index[0], ad.index[-1], freq="D")
n_df_ful = pd.DataFrame(np.full(idx.shape, np.nan), index=idx, columns=['N'])
n_df_ful['N'][ad.index] = ad.values.reshape(-1, )
n_df_obj = n_df_ful[n_ts.index[0]: n_ts.index[-1]]
n_df_obj1 = n_df_obj.bfill()
return n_df_obj1 | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/et/et_methods.py | et_methods.py |
from typing import Union, List
from .utils import _make_output_layer
def TabTransformer(
num_numeric_features: int,
cat_vocabulary: dict,
hidden_units=32,
num_heads: int = 4,
depth: int = 4,
dropout: float = 0.1,
num_dense_lyrs: int = 1,
prenorm_mlp:bool = True,
post_norm: bool = True,
final_mlp_units: Union[int, List[int]] = 16,
num_outputs: int = 1,
mode: str = "regression",
output_activation: str = None,
seed:int = 313,
backend:str = "tf"
)->dict:
"""
Tab Transformer following the work of `Huang et al., 2021 <https://arxiv.org/abs/2012.06678>_`
Parameters
----------
num_numeric_features : int
number of numeric features to be used as input.
cat_vocabulary : dict
a dictionary whose keys are names of categorical features and values
are lists which consist of unique values of categorical features.
You can use the function :fun:`ai4water.models.utils.gen_cat_vocab` to create this for your
own data. The length of dictionary should be equal to number of
categorical features.
hidden_units : int, optional (default=32)
number of hidden units
num_heads : int, optional (default=4)
number of attention heads
depth : int (default=4)
number of transformer blocks to be stacked on top of each other
dropout : int, optional (default=0.1)
droput rate in transformer
post_norm : bool (default=True)
prenorm_mlp : bool (default=True)
num_dense_lyrs : int (default=2)
number of dense layers in MLP block
final_mlp_units : int (default=16)
number of units/neurons in final MLP layer i.e. the MLP layer
after Transformer block
num_outputs : int, optional (default=1)
number of output features. If ``mode`` is ``classification``, this refers
to number of classes.
mode : str, optional (default="regression")
either ``regression`` or ``classification``
output_activation : str, optional (default=None)
activation of the output layer. If not given and the mode is clsasification
then the activation of output layer is decided based upon ``num_outputs``
argument. In such a case, for binary classification, sigmoid with 1 output
neuron is preferred. Therefore, even if the num_outputs are 2,
the last layer will have 1 neuron and activation function is ``sigmoid``.
Although the user can set ``softmax`` for 2 num_outputs as well
(binary classification) but this seems superfluous and is slightly
more expensive.
For multiclass, the last layer will have neurons equal to num_outputs
and ``softmax`` as activation.
seed : int
seed for reproducibility
backend : str
either ``tf`` or ``pytorch``
Returns
-------
dict :
a dictionary with ``layers`` as key
Examples
----------
>>> from ai4water import Model
>>> from ai4water.models import TabTransformer
>>> from ai4water.utils.utils import TrainTestSplit
>>> from ai4water.models.utils import gen_cat_vocab
>>> from ai4water.datasets import mg_photodegradation
...
... # bring the data as DataFrame
>>> data, _, _ = mg_photodegradation()
... # Define categorical and numerical features and label
>>> NUMERIC_FEATURES = data.columns.tolist()[0:9]
>>> CAT_FEATURES = ["Catalyst_type", "Anions"]
>>> LABEL = "Efficiency (%)"
... # create vocabulary of unique values of categorical features
>>> cat_vocab = gen_cat_vocab(data)
... # make sure the data types are correct
>>> data[NUMERIC_FEATURES] = data[NUMERIC_FEATURES].astype(float)
>>> data[CAT_FEATURES] = data[CAT_FEATURES].astype(str)
>>> data[LABEL] = data[LABEL].astype(float)
>>> # split the data into training and test set
>>> splitter = TrainTestSplit(seed=313)
>>> train_data, test_data, _, _ = splitter.split_by_random(data)
...
... # build the model
>>> model = Model(model=TabTransformer(
... num_numeric_features=len(NUMERIC_FEATURES), cat_vocabulary=cat_vocab,
... hidden_units=16, final_mlp_units=[84, 42]))
... # make a list of input arrays for training data
>>> train_x = [train_data[NUMERIC_FEATURES].values, train_data[CAT_FEATURES]]
>>> test_x = [test_data[NUMERIC_FEATURES].values, test_data[CAT_FEATURES].values]
...
>>> h = model.fit(x=train_x, y= train_data[LABEL].values,
... validation_data=(test_x, test_data[LABEL].values), epochs=1)
"""
kws = dict(
cat_vocabulary=cat_vocabulary,
num_numeric_features=num_numeric_features,
hidden_units=hidden_units,
num_heads = num_heads,
depth = depth,
dropout = dropout,
num_dense_lyrs = num_dense_lyrs,
prenorm_mlp = prenorm_mlp,
post_norm = post_norm,
final_mlp_units = final_mlp_units,
num_outputs = num_outputs,
mode = mode,
output_activation = output_activation,
seed = seed)
if backend=="tf":
from ._tensorflow import TabTransformer
return TabTransformer(**kws)
else:
raise NotImplementedError
def FTTransformer(
num_numeric_features:int,
cat_vocabulary:dict = None,
hidden_units = 32,
num_heads: int = 4,
depth:int = 4,
dropout: float = 0.1,
num_dense_lyrs:int = 2,
post_norm:bool = True,
final_mlp_units:int = 16,
num_outputs: int = 1,
mode: str = "regression",
output_activation: str = None,
seed: int = 313,
backend:str = "tf"
)->dict:
"""
FT Transformer following the work of `Gorishniy et al., 2021 <https://arxiv.org/pdf/2106.11959v2.pdf>`_
Parameters
----------
num_numeric_features : int
number of numeric features to be used as input.
cat_vocabulary : dict
a dictionary whose keys are names of categorical features and values
are lists which consist of unique values of categorical features.
You can use the function :fun:`ai4water.models.utils.gen_cat_vocab` to create this for your
own data. The length of dictionary should be equal to number of
categorical features. If it is None, then it is supposed that
no categoical variables are available and the model will expect only
numerical input features.
hidden_units : int, optional (default=32)
number of hidden units
num_heads : int, optional (default=4)
number of attention heads
depth : int (default=4)
number of transformer blocks to be stacked on top of each other
dropout : int, optional (default=0.1)
droput rate in transformer
post_norm : bool (default=True)
num_dense_lyrs : int (default=2)
number of dense layers in MLP block
final_mlp_units : int (default=16)
number of units/neurons in final MLP layer i.e. the MLP layer
after Transformer block
num_outputs : int, optional (default=1)
number of output features. If ``mode`` is ``classification``, this refers
to number of classes.
mode : str, optional (default="regression")
either ``regression`` or ``classification``
output_activation : str, optional (default=None)
activation of the output layer. If not given and the mode is clsasification
then the activation of output layer is decided based upon ``num_outputs``
argument. In such a case, for binary classification, sigmoid with 1 output
neuron is preferred. Therefore, even if the num_outputs are 2,
the last layer will have 1 neuron and activation function is ``sigmoid``.
Although the user can set ``softmax`` for 2 num_outputs as well
(binary classification) but this seems superfluous and is slightly
more expensive.
For multiclass, the last layer will have neurons equal to num_outputs
and ``softmax`` as activation.
seed : int
backend : str
either ``tf`` or ``pytorch``
Returns
-------
dict :
a dictionary with ``layers`` as key
Examples
----------
>>> from ai4water import Model
>>> from ai4water.models import FTTransformer
>>> from ai4water.datasets import mg_photodegradation
>>> from ai4water.models.utils import gen_cat_vocab
>>> from ai4water.utils.utils import TrainTestSplit
>>> # bring the data as DataFrame
>>> data, _, _ = mg_photodegradation()
... # Define categorical and numerical features and label
>>> NUMERIC_FEATURES = data.columns.tolist()[0:9]
>>> CAT_FEATURES = ["Catalyst_type", "Anions"]
>>> LABEL = "Efficiency (%)"
... # create vocabulary of unique values of categorical features
>>> cat_vocab = gen_cat_vocab(data)
... # make sure the data types are correct
>>> data[NUMERIC_FEATURES] = data[NUMERIC_FEATURES].astype(float)
>>> data[CAT_FEATURES] = data[CAT_FEATURES].astype(str)
>>> data[LABEL] = data[LABEL].astype(float)
... # split the data into training and test set
>>> splitter = TrainTestSplit(seed=313)
>>> train_data, test_data, _, _ = splitter.split_by_random(data)
... # build the model
>>> model = Model(model=FTTransformer(len(NUMERIC_FEATURES), cat_vocab))
... # make a list of input arrays for training data
>>> train_x = [train_data[NUMERIC_FEATURES].values,
... train_data[CAT_FEATURES].values]
...
>>> test_x = [test_data[NUMERIC_FEATURES].values,
... test_data[CAT_FEATURES].values]
... # train the model
>>> h = model.fit(x=train_x, y= train_data[LABEL].values,
... validation_data=(test_x, test_data[LABEL].values),
... epochs=1)
"""
kws = dict(
cat_vocabulary=cat_vocabulary,
num_numeric_features=num_numeric_features,
hidden_units=hidden_units,
num_heads = num_heads,
depth = depth,
dropout = dropout,
num_dense_lyrs = num_dense_lyrs,
post_norm = post_norm,
final_mlp_units = final_mlp_units,
num_outputs = num_outputs,
mode = mode,
output_activation = output_activation,
seed = seed)
if backend=="tf":
from ._tensorflow import FTTransformer
return FTTransformer(**kws)
else:
raise NotImplementedError
def MLP(
units: Union[int, list] = 32,
num_layers:int = 1,
input_shape: tuple = None,
num_outputs:int = 1,
activation: Union[str, list] = None,
dropout: Union[float, list] = None,
mode:str = "regression",
output_activation:str = None,
backend:str = "tf",
**kwargs
)->dict:
"""helper function to make multi layer perceptron model.
This model consists of stacking layers of Dense_ layers. The number of
dense layers are defined by ``num_layers``. Each layer can be optionaly
followed by a Dropout_ layer.
Parameters
----------
units : Union[int, list], default=32
number of units in Dense layer
num_layers : int, optional, (default, 1)
number of Dense_ or Linear_ layers to use as hidden layers, excluding output layer.
input_shape : tuple, optional (default=None)
shape of input tensor to the model. If specified, it should exclude batch_size
for example if model takes inputs (num_examples, num_features) then
we should define the shape as (num_features,). The batch_size dimension
is always None.
num_outputs : int, (default=1)
number of output features from the network
activation : Union[str, list], optional (default=None)
activation function to use.
dropout : Union[float, list], optional
dropout to use in Dense layer
mode : str, optional (default="regression")
either ``regression`` or ``classification``
output_activation : str, optional (default=None)
activation of the output layer. If not given and the mode is clsasification
then the activation of output layer is decided based upon ``num_outputs``
argument. In such a case, for binary classification, sigmoid with 1 output
neuron is preferred. Therefore, even if the num_outputs are 2,
the last layer will have 1 neuron and activation function is ``sigmoid``.
Although the user can set ``softmax`` for 2 num_outputs as well
(binary classification) but this seems superfluous and is slightly
more expensive.
For multiclass, the last layer will have neurons equal to num_outputs
and ``softmax`` as activation.
backend : str (default='tf')
either ``tf`` or ``pytorch``
**kwargs :
any additional keyword arguments for Dense_ layer
Returns
-------
dict :
a dictionary with 'layers' as key which can be fed to ai4water's Model
Examples
--------
>>> from ai4water import Model
>>> from ai4water.models import MLP
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> input_features = data.columns.tolist()[0:-1]
>>> output_features = data.columns.tolist()[-1:]
... # build a basic MLP
>>> MLP(32)
... # MLP with 3 Dense layers
>>> MLP(32, 3)
... # we can specify input shape as 3d (first dimension is always None)
>>> MLP(32, 3, (5, 10))
... # we can also specify number of units for each layer
>>> MLP([32, 16, 8], 3, (13, 1))
... # we can feed any argument which is accepted by Dense layer
>>> mlp = MLP(32, 3, (13, ), use_bias=True, activation="relu")
... # we can feed the output of MLP to ai4water's Model
>>> model = Model(model=mlp, input_features=input_features,
>>> output_features=output_features)
>>> model.fit(data=data)
similary for pytorch as backend we can build the model as below
>>> model = Model(model=MLP(32, 2, (13,), backend="pytorch"),
... backend="pytorch",
... input_features = input_features,
... output_features = output_features)
>>> model.fit(data=data)
.. _Dense:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense
.. _Linear:
https://pytorch.org/docs/stable/generated/torch.nn.Linear.html
.. _Dropout:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dropout
"""
check_backend("MLP", backend=backend)
kws = dict(
units = units,
num_layers = num_layers,
input_shape = input_shape,
num_outputs = num_outputs,
activation = activation,
dropout = dropout,
mode = mode,
output_activation = output_activation,
**kwargs
)
if backend == "tf":
from ._tensorflow import MLP
return MLP(**kws)
else:
from ._torch import MLP
return MLP(**kws)
def LSTM(
units: Union[int, list] = 32,
num_layers:int = 1,
input_shape: tuple = None,
num_outputs:int = 1,
activation: Union[str, list] = None,
dropout: Union[float, list] = None,
mode:str = "regression",
output_activation:str = None,
backend:str = "tf",
**kwargs
):
"""helper function to make LSTM Model
Parameters
----------
units : Union[int, list], optional (default 32)
number of units in LSTM layer
num_layers : int (default=1)
number of lstm layers to use
input_shape : tuple, optional (default=None)
shape of input tensor to the model. If specified, it should exclude batch_size
for example if model takes inputs (num_examples, lookback, num_features) then
we should define the shape as (lookback, num_features). The batch_size dimension
is always None.
num_outputs : int, optinoal (default=1)
number of output features. If ``mode`` is ``classification``, this refers
to number of classes.
activation : Union[str, list], optional
activation function to use in LSTM
dropout :
if > 0.0, a dropout layer is added after each LSTM layer
mode : str, optional
either ``regression`` or ``classification``
output_activation : str, optional (default=None)
activation of the output layer. If not given and the mode is clsasification
then the activation of output layer is decided based upon ``num_outputs``
argument. In such a case, for binary classification, sigmoid with 1 output
neuron is preferred. Therefore, even if the num_outputs are 2,
the last layer will have 1 neuron and activation function is ``sigmoid``.
Although the user can set ``softmax`` for 2 num_outputs as well
(binary classification) but this seems superfluous and is slightly
more expensive.
For multiclass, the last layer will have neurons equal to num_outputs
and ``softmax`` as activation.
backend : str
the type of backend to use. Allowed vlaues are ``tf`` or ``pytorch``.
**kwargs :
any keyword argument for LSTM layer of `tensorflow <https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM>`_
or `pytorch <https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html>`_ layer
Returns
-------
dict :
a dictionary with 'layers' as key
Examples
--------
>>> from ai4water import Model
>>> from ai4water.models import LSTM
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> input_features = data.columns.tolist()[0:-1]
>>> output_features = data.columns.tolist()[-1:]
# a simple LSTM model with 32 neurons/units
>>> LSTM(32)
# to build a model with stacking of LSTM layers
>>> LSTM(32, num_layers=2)
# we can build ai4water's model and train it
>>> lstm = LSTM(32)
>>> model = Model(model=lstm, input_features=input_features,
>>> output_features=output_features, ts_args={"lookback": 5})
>>> model.fit(data=data)
Similary for pytorch as backend the interface will be same
except that we need to explicitly tell the backend as below
>>> model = Model(model=LSTM(32, 2, (5, 13), backend="pytorch"),
... backend="pytorch",
... input_features = input_features,
... output_features = output_features,
... ts_args={'lookback':5})
>>> model.fit(data=data)
"""
check_backend("LSTM", backend=backend)
kws = dict(
units = units,
num_layers = num_layers,
input_shape = input_shape,
num_outputs = num_outputs,
activation = activation,
dropout = dropout,
mode = mode,
output_activation = output_activation,
**kwargs
)
if backend == "tf":
from ._tensorflow import LSTM
return LSTM(**kws)
else:
from ._torch import LSTM
return LSTM(**kws)
def AttentionLSTM(
units: Union[int, list] = 32,
num_layers:int = 1,
input_shape: tuple = None,
num_outputs:int = 1,
activation: Union[str, list] = None,
dropout: Union[float, list] = None,
atten_units:int = 128,
atten_activation:str = "tanh",
mode:str = "regression",
output_activation:str = None,
backend:str = "tf",
**kwargs
):
"""helper function to make LSTM Model
Parameters
----------
units : Union[int, list], optional (default 32)
number of units in LSTM layer
num_layers : int (default=1)
number of lstm layers to use
input_shape : tuple, optional (default=None)
shape of input tensor to the model. If specified, it should exclude batch_size
for example if model takes inputs (num_examples, lookback, num_features) then
we should define the shape as (lookback, num_features). The batch_size dimension
is always None.
num_outputs : int, optinoal (default=1)
number of output features. If ``mode`` is ``classification``, this refers
to number of classes.
activation : Union[str, list], optional
activation function to use in LSTM
dropout :
if > 0.0, a dropout layer is added after each LSTM layer
atten_units : int
number of units in SelfAttention layer
atten_activation : str
activation function in SelfAttention layer
mode : str, optional
either ``regression`` or ``classification``
output_activation : str, optional (default=None)
activation of the output layer. If not given and the mode is clsasification
then the activation of output layer is decided based upon ``num_outputs``
argument. In such a case, for binary classification, sigmoid with 1 output
neuron is preferred. Therefore, even if the num_outputs are 2,
the last layer will have 1 neuron and activation function is ``sigmoid``.
Although the user can set ``softmax`` for 2 num_outputs as well
(binary classification) but this seems superfluous and is slightly
more expensive.
For multiclass, the last layer will have neurons equal to num_outputs
and ``softmax`` as activation.
backend : str
the type of backend to use. Allowed vlaues are ``tf`` or ``pytorch``.
**kwargs :
any keyword argument for LSTM layer of `tensorflow <https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM>`_
or `pytorch <https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html>`_ layer
Returns
-------
dict :
a dictionary with 'layers' as key
Examples
--------
>>> from ai4water import Model
>>> from ai4water.datasets import busan_beach
>>> from ai4water.models import AttentionLSTM
>>> data = busan_beach()
>>> input_features = data.columns.tolist()[0:-1]
>>> output_features = data.columns.tolist()[-1:]
# a simple Attention LSTM model with 32 neurons/units
>>> AttentionLSTM(32)
# to build a model with stacking of LSTM layers
>>> AttentionLSTM(32, num_layers=2)
# we can build ai4water's model and train it
>>> lstm = AttentionLSTM(32)
>>> model = Model(model=lstm, input_features=input_features,
>>> output_features=output_features, ts_args={"lookback": 5})
>>> model.fit(data=data)
"""
check_backend("AttentionLSTM", backend=backend)
kws = dict(
units = units,
num_layers = num_layers,
input_shape = input_shape,
num_outputs = num_outputs,
activation = activation,
dropout = dropout,
atten_units = atten_units,
atten_activation = atten_activation,
mode = mode,
output_activation = output_activation,
**kwargs
)
if backend == "tf":
from ._tensorflow import AttentionLSTM
return AttentionLSTM(**kws)
else:
raise NotImplementedError
def CNN(
filters: Union[int, list] = 32,
kernel_size: Union[int, tuple, list] = 3,
convolution_type: str = "1D",
num_layers: int = 1,
padding: Union[str, list] = "same",
strides: Union[int, list]= 1,
pooling_type: Union[str, list] = None,
pool_size: Union[int, list] = 2,
batch_normalization: Union[bool, list] = None,
activation: Union[str, list] = None,
dropout: Union[float, list] = None,
input_shape: tuple = None,
num_outputs:int = 1,
mode: str = "regression",
output_activation:str = None,
backend:str = "tf",
**kwargs
)->dict:
"""helper function to make convolution neural network based model.
Parameters
----------
filters : Union[int, list], optional
number of filters in convolution layer. If given as list, it should
be equal to ``num_layers``.
kernel_size : Union[int, list], optional
kernel size in (each) convolution layer
convolution_type : str, optional, (default="1D")
either ``1D`` or ``2D`` or ``3D``
num_layers : int, optional
number of convolution layers to use. Should be > 0.
padding : Union[str, list], optional
padding to use in (each) convolution layer
strides : Union[int, list], optional
strides to use in (each) convolution layer
pooling_type : str, optional
either "MaxPool" or "AveragePooling"
pool_size : Union[int, list], optional
only valid if pooling_type is not None
batch_normalization :
whether to use batch_normalization after each convolution or
convolution+pooling layer. If true, a batch_norm_ layer
is added.
activation : Union[str, list], optional
activation function to use in convolution layer
dropout : Union[float, list], optional
if > 0.0, a dropout layer is added after each LSTM layer
input_shape : tuple, optional (default=None)
shape of input tensor to the model. If specified, it should exclude batch_size
for example if model takes inputs (num_examples, lookback, num_features) then
we should define the shape as (lookback, num_features). The batch_size dimension
is always None.
num_outputs : int, optional, (default=1)
number of output features. If ``mode`` is ``classification``, this refers
to number of classes.
mode : str, optional
either ``regression`` or ``classification``
output_activation : str, optional (default=None)
activation of the output layer. If not given and the mode is clsasification
then the activation of output layer is decided based upon ``num_outputs``
argument. In such a case, for binary classification, sigmoid with 1 output
neuron is preferred. Therefore, even if the num_outputs are 2,
the last layer will have 1 neuron and activation function is ``sigmoid``.
Although the user can set ``softmax`` for 2 num_outputs as well
(binary classification) but this seems superfluous and is slightly
more expensive.
For multiclass, the last layer will have neurons equal to num_outputs
and ``softmax`` as activation.
backend : str
either "tf" or "pytorch"
**kwargs :
any keyword argument for Convolution_ layer
Returns
-------
dict :
a dictionary with 'layers' as key
Examples
--------
>>> from ai4water import Model
>>> from ai4water.models import CNN
>>> from ai4water.datasets import busan_beach
...
>>> data = busan_beach()
>>> time_steps = 5
>>> input_features = data.columns.tolist()[0:-1]
>>> output_features = data.columns.tolist()[-1:]
>>> model_config = CNN(32, 2, "1D", input_shape=(time_steps, len(input_features)))
>>> model = Model(model=model_config, ts_args={"lookback": time_steps}, backend="pytorch",
... input_features=input_features, output_features=output_features)
...
>>> model.fit(data=data)
>>> model_config = CNN(32, 2, "1D", pooling_type="MaxPool", input_shape=(time_steps, len(input_features)))
>>> model = Model(model=model_config, ts_args={"lookback": time_steps}, backend="pytorch",
... input_features=input_features, output_features=output_features)
...
>>> model.fit(data=data)
.. _Convolution:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1D
.. _batch_norm:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization
"""
check_backend("CNN", backend=backend)
kws = dict(
filters=filters,
kernel_size=kernel_size,
convolution_type=convolution_type,
num_layers=num_layers,
padding=padding,
strides=strides,
pooling_type=pooling_type,
pool_size=pool_size,
batch_normalization=batch_normalization,
activation=activation,
dropout=dropout,
input_shape=input_shape,
num_outputs=num_outputs,
mode=mode,
output_activation=output_activation,
**kwargs
)
if backend == "tf":
from ._tensorflow import CNN
return CNN(**kws)
else:
for arg in ['strides', 'kernel_size']:
kws.pop(arg)
from ._torch import CNN
return CNN(**kws)
def CNNLSTM(
input_shape:tuple,
sub_sequences=3,
cnn_layers:int = 2,
lstm_layers:int = 1,
filters:Union[int, list]=32,
kernel_size: Union[int, tuple, list]=3,
max_pool:bool=False,
units: Union[int, tuple, list] = 32,
num_outputs:int = 1,
mode:str = "regression",
output_activation:str = None,
backend:str = "tf",
)->dict:
"""
helper function to make CNNLSTM model. It adds one or more 1D convolutional
layers before one or more LSTM layers.
Parameters
----------
input_shape : tuple
shape of input tensor to the model. If specified, it should exclude batch_size
for example if model takes inputs (num_examples, lookback, num_features) then
we should define the shape as (lookback, num_features). The batch_size dimension
is always None.
sub_sequences : int
number of sub_sequences in which to divide the input before applying
Conv1D on it.
cnn_layers : int , optional (default=2)
number of cnn layers
lstm_layers :
number of lstm layers
filters : Union[int, list], optional
number of filters in (each) cnn layer
kernel_size : Union[int, tuple, list], optional
kernel size in (each) cnn layer
max_pool : bool, optional (default=True)
whether to use max_pool after every cnn layer or not
units : Union[int, list], optional (default=32)
number of units in (each) lstm layer
num_outputs : int, optional (default=1)
number of output features. If ``mode`` is ``classification``, this refers
to number of classes.
mode : str, optional (default="regression")
either ``regression`` or ``classification``
output_activation : str, optional (default=None)
activation of the output layer. If not given and the mode is clsasification
then the activation of output layer is decided based upon ``num_outputs``
argument. In such a case, for binary classification, sigmoid with 1 output
neuron is preferred. Therefore, even if the num_outputs are 2,
the last layer will have 1 neuron and activation function is ``sigmoid``.
Although the user can set ``softmax`` for 2 num_outputs as well
(binary classification) but this seems superfluous and is slightly
more expensive.
For multiclass, the last layer will have neurons equal to num_outputs
and ``softmax`` as activation.
backend : str
Returns
-------
dict :
a dictionary with ``layers`` as key
Examples
--------
>>> from ai4water import Model
>>> from ai4water.models import CNNLSTM
>>> from ai4water.datasets import busan_beach
... # define data and input/output features
>>> data = busan_beach()
>>> inputs = data.columns.tolist()[0:-1]
>>> outputs = [data.columns.tolist()[-1]]
>>> lookback_steps = 9
... # get configuration of CNNLSTM as dictionary which can be given to Model
>>> model_config = CNNLSTM(input_shape=(lookback_steps, len(inputs)), sub_sequences=3)
... # build the model
>>> model = Model(model=model_config, input_features=inputs,
... output_features=outputs, ts_args={"lookback": lookback_steps})
... # train the model
>>> model.fit(data=data)
"""
check_backend("CNNLSTM", backend=backend)
kws = dict(
input_shape=input_shape,
sub_sequences=sub_sequences,
cnn_layers=cnn_layers,
lstm_layers=lstm_layers,
filters=filters,
kernel_size=kernel_size,
max_pool=max_pool,
units=units,
num_outputs=num_outputs,
mode=mode,
output_activation=output_activation
)
if backend == "tf":
from ._tensorflow import CNNLSTM
return CNNLSTM(**kws)
else:
raise NotImplementedError
def LSTMAutoEncoder(
input_shape:tuple,
encoder_layers:int = 1,
decoder_layers:int = 1,
encoder_units: Union[int, list]=32,
decoder_units: Union[int, list]=32,
num_outputs: int = 1,
prediction_mode: bool = True,
mode:str = "regression",
output_activation: str = None,
backend:str = "tf",
**kwargs
)->dict:
"""
helper function to make LSTM based AutoEncoder model.
Parameters
----------
input_shape : tuple
shape of input tensor to the model. This shape should exclude batch_size
for example if model takes inputs (num_examples, num_features) then
we should define the shape as (num_features,). The batch_size dimension
is always None.
encoder_layers : int, optional (default=1)
number of encoder LSTM layers
decoder_layers : int, optional (default=1)
number of decoder LSTM layers
encoder_units : Union[int, list], optional, (default=32)
number of units in (each) encoder LSTM
decoder_units : Union[int, list], optional, (default=32)
number of units in (each) decoder LSTM
prediction_mode : bool, optional (default="prediction")
either "prediction" or "reconstruction"
num_outputs : int, optional
number of output features. If ``mode`` is ``classification``, this refers
to number of classes.
mode : str, optional (default="regression")
either ``regression`` or ``classification``
output_activation : str, optional (default=None)
activation of the output layer. If not given and the mode is clsasification
then the activation of output layer is decided based upon ``num_outputs``
argument. In such a case, for binary classification, sigmoid with 1 output
neuron is preferred. Therefore, even if the num_outputs are 2,
the last layer will have 1 neuron and activation function is ``sigmoid``.
Although the user can set ``softmax`` for 2 num_outputs as well
(binary classification) but this seems superfluous and is slightly
more expensive.
For multiclass, the last layer will have neurons equal to num_outputs
and ``softmax`` as activation.
backend : str
**kwargs
Returns
-------
dict :
a dictionary with ``layers`` as key
Examples
--------
>>> from ai4water import Model
>>> from ai4water.models import LSTMAutoEncoder
>>> from ai4water.datasets import busan_beach
... # define data and input/output features
>>> data = busan_beach()
>>> inputs = data.columns.tolist()[0:-1]
>>> outputs = [data.columns.tolist()[-1]]
>>> lookback_steps = 9
... # get configuration of CNNLSTM as dictionary which can be given to Model
>>> model_config = LSTMAutoEncoder((lookback_steps, len(inputs)), 2, 2, 32, 32)
... # build the model
>>> model = Model(model=model_config, input_features=inputs,
... output_features=outputs, ts_args={"lookback": lookback_steps})
... # train the model
>>> model.fit(data=data)
specify neurons in each of encoder and decoder LSTMs
>>> model_config = LSTMAutoEncoder((lookback_steps, len(inputs)), 2, 2, [64, 32], [32, 64])
... # build the model
>>> model = Model(model=model_config, input_features=inputs,
... output_features=outputs, ts_args={"lookback": lookback_steps})
... # train the model
>>> model.fit(data=data)
"""
check_backend("LSTMAutoEncoder", backend=backend)
kws = dict(
input_shape=input_shape,
encoder_layers=encoder_layers,
decoder_layers=decoder_layers,
encoder_units=encoder_units,
decoder_units=decoder_units,
num_outputs=num_outputs,
prediction_mode=prediction_mode,
mode=mode,
output_activation=output_activation,
**kwargs
)
if backend == "tf":
from ._tensorflow import LSTMAutoEncoder
return LSTMAutoEncoder(**kws)
else:
raise NotImplementedError
def TCN(
input_shape,
filters:int = 32,
kernel_size: int = 2,
nb_stacks: int = 1,
dilations = [1, 2, 4, 8, 16, 32],
num_outputs:int = 1,
mode="regression",
output_activation: str = None,
backend:str = "tf",
**kwargs
)->dict:
"""helper function for building temporal convolution network
Parameters
----------
input_shape : tuple
shape of input tensor to the model. This shape should exclude batch_size
for example if model takes inputs (num_examples, num_features) then
we should define the shape as (num_features,). The batch_size dimension
is always None.
filters : int, optional (default=32)
number of filters
kernel_size : int, optional (default=2)
kernel size
nb_stacks : int, optional (default=
number of stacks of tcn layer
dilations :
dilation rate
num_outputs : int, optional
number of output features. If ``mode`` is ``classification``, this refers
to number of classes.
mode : str, optional (default="regression")
either ``regression`` or ``classification``
output_activation : str, optional (default=None)
activation of the output layer. If not given and the mode is clsasification
then the activation of output layer is decided based upon ``num_outputs``
argument. In such a case, for binary classification, sigmoid with 1 output
neuron is preferred. Therefore, even if the num_outputs are 2,
the last layer will have 1 neuron and activation function is ``sigmoid``.
Although the user can set ``softmax`` for 2 num_outputs as well
(binary classification) but this seems superfluous and is slightly
more expensive.
For multiclass, the last layer will have neurons equal to num_outputs
and ``softmax`` as activation.
backend : str
**kwargs
any additional keyword argument
Returns
-------
dict :
a dictionary with ``layers`` as key
Examples
--------
>>> from ai4water import Model
>>> from ai4water.models import TCN
>>> from ai4water.datasets import busan_beach
... # define data and input/output features
>>> data = busan_beach()
>>> inputs = data.columns.tolist()[0:-1]
>>> outputs = [data.columns.tolist()[-1]]
>>> lookback_steps = 9
... # get configuration of CNNLSTM as dictionary which can be given to Model
>>> model_config = TCN((lookback_steps, len(inputs)), 32)
... # build the model
>>> model = Model(model=model_config, input_features=inputs,
... output_features=outputs, ts_args={"lookback": lookback_steps})
... # train the model
>>> model.fit(data=data)
"""
check_backend("TCN", backend=backend)
kws = dict(
input_shape=input_shape,
filters=filters,
kernel_size=kernel_size,
nb_stacks=nb_stacks,
dilations=dilations,
num_outputs=num_outputs,
mode=mode,
output_activation=output_activation,
**kwargs
)
if backend == "tf":
from ._tensorflow import TCN
return TCN(**kws)
else:
raise NotImplementedError
def TFT(
input_shape,
hidden_units: int = 32,
num_heads: int = 3,
dropout:float = 0.1,
num_outputs:int = 1,
use_cudnn:bool = False,
mode:str="regression",
output_activation:str = None,
backend:str = "tf",
)->dict:
"""helper function for temporal fusion transformer based model
Parameters
----------
input_shape : tuple
shape of input tensor to the model. This shape should exclude batch_size
for example if model takes inputs (num_examples, num_features) then
we should define the shape as (num_features,). The batch_size dimension
is always None.
hidden_units : int, optional (default=32)
number of hidden units
num_heads : int, optional (default=1)
number of attention heads
dropout : int, optional (default=0.1)
droput rate
num_outputs : int, optional (default=1)
number of output features. If ``mode`` is ``classification``, this refers
to number of classes.
use_cudnn : bool, optional (default=False)
whether to use cuda or not
mode : str, optional (default="regression")
either ``regression`` or ``classification``
output_activation : str, optional (default=None)
activation of the output layer. If not given and the mode is clsasification
then the activation of output layer is decided based upon ``num_outputs``
argument. In such a case, for binary classification, sigmoid with 1 output
neuron is preferred. Therefore, even if the num_outputs are 2,
the last layer will have 1 neuron and activation function is ``sigmoid``.
Although the user can set ``softmax`` for 2 num_outputs as well
(binary classification) but this seems superfluous and is slightly
more expensive.
For multiclass, the last layer will have neurons equal to num_outputs
and ``softmax`` as activation.
backend : str
Returns
-------
dict :
a dictionary with ``layers`` as key
Examples
--------
>>> from ai4water.functional import Model
>>> from ai4water.models import TFT
>>> from ai4water.datasets import busan_beach
>>> model = Model(model=TFT(input_shape=(14, 13)),
... ts_args={"lookback": 14})
>>> model.fit(data=busan_beach())
"""
kws = dict(
input_shape=input_shape,
hidden_units=hidden_units,
num_heads=num_heads,
dropout=dropout,
use_cudnn=use_cudnn,
num_outputs=num_outputs,
mode=mode,
output_activation=output_activation
)
if backend == "tf":
from ._tensorflow import TFT
return TFT(**kws)
else:
raise NotImplementedError
def check_backend(model:str, backend:str="tf")->None:
if backend=="tf":
try:
import tensorflow as tf
except Exception as e:
raise Exception(f"""
You must have installed tensorflow to use {model} model.
Importing tensorflow raised following error \n{e}""")
elif backend == "pytorch":
try:
import torch
except Exception as e:
raise Exception(f"""
You must have installed PyTorch to use {model} model.
Importing pytorch raised following error \n{e}""")
return | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/models/__init__.py | __init__.py |
__all__ = ["attn_layers"]
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import array_ops
import math
from ai4water.backend import np, tf, keras
initializers = tf.keras.initializers
regularizers = tf.keras.regularizers
constraints = tf.keras.constraints
Layer = tf.keras.layers.Layer
layers = tf.keras.layers
K = tf.keras.backend
Dense = tf.keras.layers.Dense
Lambda = tf.keras.layers.Lambda
Activation = tf.keras.layers.Activation
Softmax = tf.keras.layers.Softmax
dot = tf.keras.layers.dot
concatenate = tf.keras.layers.concatenate
# A review of different attention mechanisms is given at following link
# https://lilianweng.github.io/lil-log/2018/06/24/attention-attention.html
# Raffel, SeqWeightedSelfAttention and HierarchicalAttention appear to be very much similar.
class BahdanauAttention(Layer):
"""
Also known as Additive attention.
https://github.com/thushv89/attention_keras/blob/master/src/layers/attention.py
This can be implemented in encoder-decoder model as shown here.
https://github.com/thushv89/attention_keras/blob/master/src/examples/nmt/model.py
This class implements Bahdanau attention (https://arxiv.org/pdf/1409.0473.pdf).
There are three sets of weights introduced W_a, U_a, and V_a
The original code had following MIT licence
----------------------------------------------------------
MIT License
Copyright (c) 2019 Thushan Ganegedara
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
----------------------------------------------------------
"""
def __init__(self, **kwargs):
super(BahdanauAttention, self).__init__(**kwargs)
def build(self, input_shape):
assert isinstance(input_shape, list)
# Create a trainable weight variable for this layer.
self.W_a = self.add_weight(name='W_a',
shape=tf.TensorShape((input_shape[0][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.U_a = self.add_weight(name='U_a',
shape=tf.TensorShape((input_shape[1][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.V_a = self.add_weight(name='V_a',
shape=tf.TensorShape((input_shape[0][2], 1)),
initializer='uniform',
trainable=True)
super(BahdanauAttention, self).build(input_shape)
def __call__(self, inputs, verbose=False):
"""
inputs: [encoder_output_sequence, decoder_output_sequence]
"""
if not self.built:
self._maybe_build(inputs)
assert type(inputs) == list
encoder_out_seq, decoder_out_seq = inputs
if verbose:
print('encoder_out_seq>', encoder_out_seq.shape)
print('decoder_out_seq>', decoder_out_seq.shape)
def energy_step(inputs, states):
""" Step function for computing energy for a single decoder state """
assert_msg = "States must be a list. However states {} is of type {}".format(states, type(states))
assert isinstance(states, list) or isinstance(states, tuple), assert_msg
""" Some parameters required for shaping tensors"""
en_seq_len, en_hidden = encoder_out_seq.shape[1], encoder_out_seq.shape[2]
de_hidden = inputs.shape[-1]
""" Computing S.Wa where S=[s0, s1, ..., si]"""
# batch_size*en_seq_len, latent_dim
reshaped_enc_outputs = K.reshape(encoder_out_seq, (-1, en_hidden))
# batch_size*en_seq_len, latent_dim
W_a_dot_s = K.reshape(K.dot(reshaped_enc_outputs, self.W_a), (-1, en_seq_len, en_hidden))
if verbose:
print('wa.s>',W_a_dot_s.shape)
""" Computing hj.Ua """
# batch_size, 1, latent_dim
U_a_dot_h = K.expand_dims(K.dot(inputs, self.U_a), 1)
if verbose:
print('Ua.h>',U_a_dot_h.shape)
""" tanh(S.Wa + hj.Ua) """
# batch_size*en_seq_len, latent_dim
reshaped_Ws_plus_Uh = K.tanh(K.reshape(W_a_dot_s + U_a_dot_h, (-1, en_hidden)))
if verbose:
print('Ws+Uh>', reshaped_Ws_plus_Uh.shape)
""" softmax(va.tanh(S.Wa + hj.Ua)) """
# batch_size, en_seq_len
e_i = K.reshape(K.dot(reshaped_Ws_plus_Uh, self.V_a), (-1, en_seq_len))
# batch_size, en_seq_len
e_i = K.softmax(e_i, name='softmax')
if verbose:
print('ei>', e_i.shape)
return e_i, [e_i]
def context_step(inputs, states):
""" Step function for computing ci using ei """
# batch_size, hidden_size
c_i = K.sum(encoder_out_seq * K.expand_dims(inputs, -1), axis=1)
if verbose:
print('ci>', c_i.shape)
return c_i, [c_i]
def create_inital_state(inputs, hidden_size):
# We are not using initial states, but need to pass something to K.rnn funciton
fake_state = K.zeros_like(inputs) # <= (batch_size, enc_seq_len, latent_dim)
fake_state = K.sum(fake_state, axis=[1, 2]) # <= (batch_size)
fake_state = K.expand_dims(fake_state) # <= (batch_size, 1)
fake_state = K.tile(fake_state, [1, hidden_size]) # <= (batch_size, latent_dim)
return fake_state
fake_state_c = create_inital_state(encoder_out_seq, encoder_out_seq.shape[-1])
fake_state_e = create_inital_state(encoder_out_seq, encoder_out_seq.shape[1]) # <= (batch_size, enc_seq_len, latent_dim)
""" Computing energy outputs """
# e_outputs => (batch_size, de_seq_len, en_seq_len)
last_out, e_outputs, _ = K.rnn(
energy_step, decoder_out_seq, [fake_state_e],
)
""" Computing context vectors """
last_out, c_outputs, _ = K.rnn(
context_step, e_outputs, [fake_state_c],
)
return c_outputs, e_outputs
def compute_output_shape(self, input_shape):
""" Outputs produced by the layer """
return [
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))
]
def dot_product(x, kernel):
"""
Wrapper for dot product operation, in order to be compatible with both
Theano and Tensorflow
Args:
x (): input
kernel (): weights
Returns:
"""
if K.backend() == 'tensorflow':
return K.squeeze(K.dot(x, K.expand_dims(kernel)), axis=-1)
else:
return K.dot(x, kernel)
class HierarchicalAttention(Layer):
"""
Used from https://gist.github.com/cbaziotis/7ef97ccf71cbc14366835198c09809d2
Attention operation, with a context/query vector, for temporal data.
Supports Masking.
Follows the work of Yang et al., 2016 [https://www.cs.cmu.edu/~./hovy/papers/16HLT-hierarchical-attention-networks.pdf]
"Hierarchical Attention Networks for Document Classification"
by using a context vector to assist the attention
# Input shape
3D tensor with shape: `(samples, steps, features)`.
# Output shape
2D tensor with shape: `(samples, features)`.
How to use:
Just put it on top of an RNN Layer (GRU/LSTM/SimpleRNN) with return_sequences=True.
The dimensions are inferred based on the output shape of the RNN.
Note: The layer has been tested with Keras 2.0.6
Example:
model.add(LSTM(64, return_sequences=True))
model.add(AttentionWithContext())
# next add a Dense layer (for classification/regression) or whatever...
"""
def __init__(self,
W_regularizer=None, u_regularizer=None, b_regularizer=None,
W_constraint=None, u_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.u_regularizer = regularizers.get(u_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.u_constraint = constraints.get(u_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(HierarchicalAttention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight(shape=(input_shape[-1], input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight(shape=(input_shape[-1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
self.u = self.add_weight(shape=(input_shape[-1],),
initializer=self.init,
name='{}_u'.format(self.name),
regularizer=self.u_regularizer,
constraint=self.u_constraint)
super(HierarchicalAttention, self).build(input_shape)
def compute_mask(self, input, input_mask=None):
# do not pass the mask to the next layers
return None
def __call__(self, x, mask=None):
# TODO different result when using call()
if not self.built:
self._maybe_build(x)
uit = dot_product(x, self.W) # eq 5 in paper
if self.bias:
uit += self.b
uit = K.tanh(uit) # eq 5 in paper
ait = dot_product(uit, self.u)
a = K.exp(ait)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
# eq 6 in paper
a /= math_ops.cast(math_ops.reduce_sum(a, axis=1, keepdims=True, name="HA_sum") + K.epsilon(), K.floatx(),
name="HA_cast")
a = array_ops.expand_dims(a, axis=-1, name="HA_expand_dims")
weighted_input = tf.math.multiply(x,a,name="HA_weighted_input") #x * a
return K.sum(weighted_input, axis=1) # eq 7 in paper
def compute_output_shape(self, input_shape):
return input_shape[0], input_shape[-1]
class SeqSelfAttention(Layer):
ATTENTION_TYPE_ADD = 'additive'
ATTENTION_TYPE_MUL = 'multiplicative'
def __init__(self,
units=32,
attention_width=None,
attention_type=ATTENTION_TYPE_ADD,
return_attention=False,
history_only=False,
kernel_initializer='glorot_normal',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
use_additive_bias=True,
use_attention_bias=True,
attention_activation=None,
attention_regularizer_weight=0.0,
**kwargs):
"""
using implementation of
https://github.com/CyberZHG/keras-self-attention/blob/master/keras_self_attention/seq_self_attention.py
Layer initialization.
For additive attention, see: https://arxiv.org/pdf/1806.01264.pdf
:param units: The dimension of the vectors that used to calculate the attention weights.
:param attention_width: The width of local attention.
:param attention_type: 'additive' or 'multiplicative'.
:param return_attention: Whether to return the attention weights for visualization.
:param history_only: Only use historical pieces of data.
:param kernel_initializer: The initializer for weight matrices.
:param bias_initializer: The initializer for biases.
:param kernel_regularizer: The regularization for weight matrices.
:param bias_regularizer: The regularization for biases.
:param kernel_constraint: The constraint for weight matrices.
:param bias_constraint: The constraint for biases.
:param use_additive_bias: Whether to use bias while calculating the relevance of inputs features
in additive mode.
:param use_attention_bias: Whether to use bias while calculating the weights of attention.
:param attention_activation: The activation used for calculating the weights of attention.
:param attention_regularizer_weight: The weights of attention regularizer.
:param kwargs: Parameters for parent class.
The original code which has here been slightly modified came with following licence.
----------------------------------
MIT License
Copyright (c) 2018 PoW
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-----------------------------------------------------------------------
"""
super(SeqSelfAttention, self).__init__(**kwargs)
self.supports_masking = True
self.units = units
self.attention_width = attention_width
self.attention_type = attention_type
self.return_attention = return_attention
self.history_only = history_only
if history_only and attention_width is None:
self.attention_width = int(1e9)
self.use_additive_bias = use_additive_bias
self.use_attention_bias = use_attention_bias
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.bias_initializer = keras.initializers.get(bias_initializer)
self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)
self.bias_regularizer = keras.regularizers.get(bias_regularizer)
self.kernel_constraint = keras.constraints.get(kernel_constraint)
self.bias_constraint = keras.constraints.get(bias_constraint)
self.attention_activation = keras.activations.get(attention_activation)
self.attention_regularizer_weight = attention_regularizer_weight
self._backend = keras.backend.backend()
if attention_type.upper().startswith('ADD'):
self.Wx, self.Wt, self.bh = None, None, None
self.Wa, self.ba = None, None
elif attention_type.upper().startswith('MUL'):
self.Wa, self.ba = None, None
else:
raise NotImplementedError('No implementation for attention type : ' + attention_type)
def get_config(self):
config = {
'units': self.units,
'attention_width': self.attention_width,
'attention_type': self.attention_type,
'return_attention': self.return_attention,
'history_only': self.history_only,
'use_additive_bias': self.use_additive_bias,
'use_attention_bias': self.use_attention_bias,
'kernel_initializer': keras.initializers.serialize(self.kernel_initializer),
'bias_initializer': keras.initializers.serialize(self.bias_initializer),
'kernel_regularizer': keras.regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': keras.regularizers.serialize(self.bias_regularizer),
'kernel_constraint': keras.constraints.serialize(self.kernel_constraint),
'bias_constraint': keras.constraints.serialize(self.bias_constraint),
'attention_activation': keras.activations.serialize(self.attention_activation),
'attention_regularizer_weight': self.attention_regularizer_weight,
}
base_config = super(SeqSelfAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
if self.attention_type.upper().startswith('ADD'):
self._build_additive_attention(input_shape)
elif self.attention_type.upper().startswith('MUL'):
self._build_multiplicative_attention(input_shape)
super(SeqSelfAttention, self).build(input_shape)
def _build_additive_attention(self, input_shape):
feature_dim = int(input_shape[2])
self.Wt = self.add_weight(shape=(feature_dim, self.units),
name='{}_Add_Wt'.format(self.name),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.Wx = self.add_weight(shape=(feature_dim, self.units),
name='{}_Add_Wx'.format(self.name),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_additive_bias:
self.bh = self.add_weight(shape=(self.units,),
name='{}_Add_bh'.format(self.name),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.Wa = self.add_weight(shape=(self.units, 1),
name='{}_Add_Wa'.format(self.name),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_attention_bias:
self.ba = self.add_weight(shape=(1,),
name='{}_Add_ba'.format(self.name),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
def _build_multiplicative_attention(self, input_shape):
feature_dim = int(input_shape[2])
self.Wa = self.add_weight(shape=(feature_dim, feature_dim),
name='{}_Mul_Wa'.format(self.name),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_attention_bias:
self.ba = self.add_weight(shape=(1,),
name='{}_Mul_ba'.format(self.name),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
def __call__(self, inputs, mask=None, **kwargs):
# TODO different result when using call()
if not self.built:
self._maybe_build(inputs)
input_len = K.shape(inputs)[1]
if self.attention_type.upper().startswith('ADD'):
e = self._call_additive_emission(inputs)
else:
e = self._call_multiplicative_emission(inputs)
if self.attention_activation is not None:
e = self.attention_activation(e)
if self.attention_width is not None:
if self.history_only:
lower = K.arange(0, input_len) - (self.attention_width - 1)
else:
lower = K.arange(0, input_len) - self.attention_width // 2
lower = K.expand_dims(lower, axis=-1)
upper = lower + self.attention_width
indices = K.expand_dims(K.arange(0, input_len), axis=0)
e -= 10000.0 * (1.0 - K.cast(lower <= indices, K.floatx()) * K.cast(indices < upper, K.floatx()))
if mask is not None:
mask = K.expand_dims(K.cast(mask, K.floatx()), axis=-1)
e -= 10000.0 * ((1.0 - mask) * (1.0 - K.permute_dimensions(mask, (0, 2, 1))))
# a_{t} = \text{softmax}(e_t)
a = Softmax(axis=-1, name='SeqSelfAttention_Softmax')(e)
# l_t = \sum_{t'} a_{t, t'} x_{t'}
v = K.batch_dot(a, inputs)
if self.attention_regularizer_weight > 0.0:
self.add_loss(self._attention_regularizer(a))
if self.return_attention:
return [v, a]
return v
def _call_additive_emission(self, inputs):
input_shape = K.shape(inputs)
batch_size, input_len = input_shape[0], input_shape[1]
# h_{t, t'} = \tanh(x_t^T W_t + x_{t'}^T W_x + b_h)
q = K.expand_dims(K.dot(inputs, self.Wt), 2)
k = K.expand_dims(K.dot(inputs, self.Wx), 1)
if self.use_additive_bias:
h = K.tanh(q + k + self.bh)
else:
h = K.tanh(q + k)
# e_{t, t'} = W_a h_{t, t'} + b_a
if self.use_attention_bias:
e = K.reshape(K.dot(h, self.Wa) + self.ba, (batch_size, input_len, input_len))
else:
e = K.reshape(K.dot(h, self.Wa), (batch_size, input_len, input_len))
return e
def _call_multiplicative_emission(self, inputs):
# e_{t, t'} = x_t^T W_a x_{t'} + b_a
e = K.batch_dot(K.dot(inputs, self.Wa), K.permute_dimensions(inputs, (0, 2, 1)))
if self.use_attention_bias:
e += self.ba[0]
return e
def compute_output_shape(self, input_shape):
output_shape = input_shape
if self.return_attention:
attention_shape = (input_shape[0], output_shape[1], input_shape[1])
return [output_shape, attention_shape]
return output_shape
def compute_mask(self, inputs, mask=None):
if self.return_attention:
return [mask, None]
return mask
def _attention_regularizer(self, attention):
batch_size = K.cast(K.shape(attention)[0], K.floatx())
input_len = K.shape(attention)[-1]
indices = K.expand_dims(K.arange(0, input_len), axis=0)
diagonal = K.expand_dims(K.arange(0, input_len), axis=-1)
eye = K.cast(K.equal(indices, diagonal), K.floatx())
return self.attention_regularizer_weight * K.sum(K.square(K.batch_dot(
attention,
K.permute_dimensions(attention, (0, 2, 1))) - eye)) / batch_size
@staticmethod
def get_custom_objects():
return {'SeqSelfAttention': SeqSelfAttention}
class SeqWeightedAttention(keras.layers.Layer):
r"""Y = \text{softmax}(XW + b) X
See: https://arxiv.org/pdf/1708.00524.pdf
using implementation of https://github.com/CyberZHG/keras-self-attention/blob/master/keras_self_attention/seq_weighted_attention.py
The original code which has here been slightly modified came with following licence.
-----------------------------------------------------------------------
MIT License
Copyright (c) 2018 PoW
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-----------------------------------------------------------------------
"""
def __init__(self, use_bias=True, return_attention=False, **kwargs):
super(SeqWeightedAttention, self).__init__(**kwargs)
self.supports_masking = True
self.use_bias = use_bias
self.return_attention = return_attention
self.W, self.b = None, None
def get_config(self):
config = {
'use_bias': self.use_bias,
'return_attention': self.return_attention,
}
base_config = super(SeqWeightedAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
self.W = self.add_weight(shape=(int(input_shape[2]), 1),
name='{}_W'.format(self.name),
initializer=keras.initializers.get('uniform'))
if self.use_bias:
self.b = self.add_weight(shape=(1,),
name='{}_b'.format(self.name),
initializer=keras.initializers.get('zeros'))
super(SeqWeightedAttention, self).build(input_shape)
def __call__(self, x, mask=None):
# TODO different result when using call()
if not self.built:
self._maybe_build(x)
logits = K.dot(x, self.W)
if self.use_bias:
logits += self.b
x_shape = K.shape(x)
logits = K.reshape(logits, (x_shape[0], x_shape[1]))
if mask is not None:
mask = K.cast(mask, K.floatx())
logits -= 10000.0 * (1.0 - mask)
ai = math_ops.exp(logits - K.max(logits, axis=-1, keepdims=True), name="SeqWeightedAttention_exp")
#att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
att_weights = tf.math.divide(ai, (math_ops.reduce_sum(ai, axis=1, keepdims=True,
name="SeqWeightedAttention_sum") + K.epsilon()),
name="SeqWeightedAttention_weights")
weighted_input = x * K.expand_dims(att_weights)
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, att_weights]
return result
def compute_output_shape(self, input_shape):
output_len = input_shape[2]
if self.return_attention:
return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]
return input_shape[0], output_len
def compute_mask(self, _, input_mask=None):
if self.return_attention:
return [None, None]
return None
@staticmethod
def get_custom_objects():
return {'SeqWeightedAttention': SeqWeightedAttention}
class SnailAttention(Layer):
"""
Based on work of Mishra et al., 2018 https://openreview.net/pdf?id=B1DmUzWAW
Adopting code from https://github.com/philipperemy/keras-snail-attention/blob/master/attention.py
"""
def __init__(self, dims, k_size, v_size, seq_len=None, **kwargs):
self.k_size = k_size
self.seq_len = seq_len
self.v_size = v_size
self.dims = dims
self.sqrt_k = math.sqrt(k_size)
self.keys_fc = None
self.queries_fc = None
self.values_fc = None
super(SnailAttention, self).__init__(**kwargs)
def build(self, input_shape):
# https://stackoverflow.com/questions/54194724/how-to-use-keras-layers-in-custom-keras-layer
self.keys_fc = Dense(self.k_size, name="Keys_SnailAttn")
self.keys_fc.build((None, self.dims))
self._trainable_weights.extend(self.keys_fc.trainable_weights)
self.queries_fc = Dense(self.k_size, name="Queries_SnailAttn")
self.queries_fc.build((None, self.dims))
self._trainable_weights.extend(self.queries_fc.trainable_weights)
self.values_fc = Dense(self.v_size, name="Values_SnailAttn")
self.values_fc.build((None, self.dims))
self._trainable_weights.extend(self.values_fc.trainable_weights)
#super(SnailAttention, self).__init__(**kwargs)
def __call__(self, inputs, **kwargs):
if not self.built:
self._maybe_build(inputs)
# check that the implementation matches exactly py torch.
keys = self.keys_fc(inputs)
queries = self.queries_fc(inputs)
values = self.values_fc(inputs)
logits = K.batch_dot(queries, K.permute_dimensions(keys, (0, 2, 1)))
mask = K.ones_like(logits) * np.triu((-np.inf) * np.ones(logits.shape.as_list()[1:]), k=1)
logits = mask + logits
probs = Softmax(axis=-1, name="Softmax_SnailAttn")(logits / self.sqrt_k)
read = K.batch_dot(probs, values)
output = K.concatenate([inputs, read], axis=-1)
return output
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[-1] += self.v_size
return tuple(output_shape)
def regularized_padded_conv(conv_dim, *args, **kwargs):
if conv_dim == "1d":
return layers.Conv1D(*args, **kwargs, padding='same', use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(5e-4))
elif conv_dim == "2d":
return layers.Conv2D(*args, **kwargs, padding='same', use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(5e-4))
else:
raise ValueError(f"conv_dim must be either 1d or 2d but it is {conv_dim}")
class ChannelAttention(layers.Layer):
"""Code adopted from https://github.com/zhangkaifang/CBAM-TensorFlow2.0.
This feature attention generates time step context descriptors self.avg and self. max by using both average and
max pooling operations along the time step axis and then fowards to a shared multi-layer perception (MLP) to produce
the feature (channel) attention map."""
def __init__(self, conv_dim, in_planes, ratio=16, **kwargs):
if conv_dim not in ["1d", "2d"]:
raise ValueError(f" conv_dim must be either 1d or 2d but it is {conv_dim}")
super(ChannelAttention, self).__init__(**kwargs)
if conv_dim == "1d":
self.axis = (1,)
self.avg= layers.GlobalAveragePooling1D()
self.max= layers.GlobalMaxPooling1D()
self.conv1 = layers.Conv1D(in_planes//ratio, kernel_size=1, strides=1, padding='same',
kernel_regularizer=regularizers.l2(5e-4),
use_bias=True, activation=tf.nn.relu, name='channel_attn1')
self.conv2 = layers.Conv1D(in_planes, kernel_size=1, strides=1, padding='same',
kernel_regularizer=regularizers.l2(5e-4),
use_bias=True, name='channel_attn2')
elif conv_dim == "2d":
self.axis = (1,1)
self.avg= layers.GlobalAveragePooling2D()
self.max= layers.GlobalMaxPooling2D()
self.conv1 = layers.Conv2D(in_planes//ratio, kernel_size=1, strides=1, padding='same',
kernel_regularizer=regularizers.l2(5e-4),
use_bias=True, activation=tf.nn.relu, name='channel_attn1')
self.conv2 = layers.Conv2D(in_planes, kernel_size=1, strides=1, padding='same',
kernel_regularizer=regularizers.l2(5e-4),
use_bias=True, name='channe2_attn1')
def __call__(self, inputs, *args):
avg = self.avg(inputs) # [256, 32, 32, 64] -> [256, 64]
max_pool = self.max(inputs) # [256, 32, 32, 64] -> [256, 64]
avg = layers.Reshape((*self.axis, avg.shape[1]))(avg) # shape (None, 1, 1 feature) # [256, 1, 1, 64]
max_pool = layers.Reshape((*self.axis, max_pool.shape[1]))(max_pool) # shape (None, 1, 1 feature) # [256, 1, 1, 64]
avg_out = self.conv2(self.conv1(avg)) # [256, 1, 1, 64] -> [256, 1, 1, 4] -> [256, 1, 1, 64]
max_out = self.conv2(self.conv1(max_pool)) # [256, 1, 1, 64] -> [256, 1, 1, 4] -> [256, 1, 1, 64]
out = avg_out + max_out # [256, 1, 1, 64]
out = tf.nn.sigmoid(out, name="ChannelAttention_sigmoid") # [256, 1, 1, 64]
return out
class SpatialAttention(layers.Layer):
"""Code adopted from https://github.com/zhangkaifang/CBAM-TensorFlow2.0 .
The time step (spatial) attention module generates a concatenated feature
descriptor [F'Tavg;F'Tmax]∈R2×T by applying average pooling and max pooling
along the feature axis, followed by a standard convolution layer.[6].
.. [6] Cheng, Y., Liu, Z., & Morimoto, Y. (2020). Attention-Based SeriesNet:
An Attention-Based Hybrid Neural Network Model
for Conditional Time Series Forecasting. Information, 11(6), 305.
"""
def __init__(self, conv_dim, kernel_size=7, **kwargs):
if conv_dim not in ["1d", "2d"]:
raise ValueError(f" conv_dim must be either 1d or 2d but it is {conv_dim}")
super(SpatialAttention, self).__init__(**kwargs)
if conv_dim == "1d":
self.axis = 2
elif conv_dim == "2d":
self.axis = 3
self.conv1 = regularized_padded_conv(conv_dim,
1, kernel_size=kernel_size, strides=1, activation=tf.nn.sigmoid, name="spatial_attn")
def __call__(self, inputs, *args):
avg_out = tf.reduce_mean(inputs, axis=self.axis, name="SpatialAttention_mean") # [256, 32, 32, 64] -> [256, 32, 32]
max_out = tf.reduce_max(inputs, axis=self.axis, name="SpatialAttention_max") # [256, 32, 32, 64] -> [256, 32, 32]
out = tf.stack([avg_out, max_out], axis=self.axis, name="SpatialAttention_stack") # concat。 -> [256, 32, 32, 2]
out = self.conv1(out) # -> [256, 32, 32, 1]
return out
class attn_layers(object):
SeqSelfAttention = SeqSelfAttention
SnailAttention = SnailAttention
SeqWeightedAttention = SeqWeightedAttention
BahdanauAttention = BahdanauAttention
HierarchicalAttention = HierarchicalAttention
SpatialAttention = SpatialAttention
ChannelAttention = ChannelAttention | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/models/_tensorflow/attention_layers.py | attention_layers.py |
from typing import Union
from ai4water.backend import tf
layers = tf.keras.layers
Dense = tf.keras.layers.Dense
Layer = tf.keras.layers.Layer
activations = tf.keras.activations
K = tf.keras.backend
constraints = tf.keras.constraints
initializers = tf.keras.initializers
regularizers = tf.keras.regularizers
from tensorflow.python.ops import array_ops
from .attention_layers import ChannelAttention, SpatialAttention, regularized_padded_conv
def _get_tensor_shape(t):
return t.shape
class ConditionalRNN(tf.keras.layers.Layer):
# Arguments to the RNN like return_sequences, return_state...
def __init__(self, units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
dropout=0.0,
recurrent_dropout=0.0,
kernel_regularizer=None,
recurrent_regularizer=None,
cell=tf.keras.layers.LSTMCell, *args,
**kwargs):
"""
Conditional RNN. Conditions time series on categorical data.
:param units: int, The number of units in the RNN Cell
:param cell: string, cell class or object (pre-instantiated). In the case of string, 'GRU',
'LSTM' and 'RNN' are supported.
:param args: Any parameters of the tf.keras.layers.RNN class, such as return_sequences,
return_state, stateful, unroll...
"""
super().__init__()
self.units = units
self.final_states = None
self.init_state = None
if isinstance(cell, str):
if cell.upper() == 'GRU':
cell = tf.keras.layers.GRUCell
elif cell.upper() == 'LSTM':
cell = tf.keras.layers.LSTMCell
elif cell.upper() == 'RNN':
cell = tf.keras.layers.SimpleRNNCell
else:
raise Exception('Only GRU, LSTM and RNN are supported as cells.')
self._cell = cell if hasattr(cell, 'units') else cell(units=units,
activation=activation,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
recurrent_activation=recurrent_activation,
kernel_initializer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
use_bias=use_bias
)
self.rnn = tf.keras.layers.RNN(cell=self._cell, *args, **kwargs)
# single cond
self.cond_to_init_state_dense_1 = tf.keras.layers.Dense(units=self.units)
# multi cond
max_num_conditions = 10
self.multi_cond_to_init_state_dense = []
for _ in range(max_num_conditions):
self.multi_cond_to_init_state_dense.append(tf.keras.layers.Dense(units=self.units))
self.multi_cond_p = tf.keras.layers.Dense(1, activation=None, use_bias=True)
def _standardize_condition(self, initial_cond):
initial_cond_shape = initial_cond.shape
if len(initial_cond_shape) == 2:
initial_cond = tf.expand_dims(initial_cond, axis=0)
first_cond_dim = initial_cond.shape[0]
if isinstance(self._cell, tf.keras.layers.LSTMCell):
if first_cond_dim == 1:
initial_cond = tf.tile(initial_cond, [2, 1, 1])
elif first_cond_dim != 2:
raise Exception('Initial cond should have shape: [2, batch_size, hidden_size] '
'or [batch_size, hidden_size]. Shapes do not match.', initial_cond_shape)
elif isinstance(self._cell, tf.keras.layers.GRUCell) or isinstance(self._cell, tf.keras.layers.SimpleRNNCell):
if first_cond_dim != 1:
raise Exception('Initial cond should have shape: [1, batch_size, hidden_size] '
'or [batch_size, hidden_size]. Shapes do not match.', initial_cond_shape)
else:
raise Exception('Only GRU, LSTM and RNN are supported as cells.')
return initial_cond
def __call__(self, inputs, *args, **kwargs):
"""
:param inputs: List of n elements:
- [0] 3-D Tensor with shape [batch_size, time_steps, input_dim]. The inputs.
- [1:] list of tensors with shape [batch_size, cond_dim]. The conditions.
In the case of a list, the tensors can have a different cond_dim.
:return: outputs, states or outputs (if return_state=False)
"""
assert (isinstance(inputs, list) or isinstance(inputs, tuple)) and len(inputs) >= 2, f"{type(inputs)}"
x = inputs[0]
cond = inputs[1:]
if len(cond) > 1: # multiple conditions.
init_state_list = []
for ii, c in enumerate(cond):
init_state_list.append(self.multi_cond_to_init_state_dense[ii](self._standardize_condition(c)))
multi_cond_state = self.multi_cond_p(tf.stack(init_state_list, axis=-1))
multi_cond_state = tf.squeeze(multi_cond_state, axis=-1)
self.init_state = tf.unstack(multi_cond_state, axis=0)
else:
cond = self._standardize_condition(cond[0])
if cond is not None:
self.init_state = self.cond_to_init_state_dense_1(cond)
self.init_state = tf.unstack(self.init_state, axis=0)
out = self.rnn(x, initial_state=self.init_state, *args, **kwargs)
if self.rnn.return_state:
outputs, h, c = out
final_states = tf.stack([h, c])
return outputs, final_states
else:
return out
class BasicBlock(layers.Layer):
"""
The official implementation is at https://github.com/Jongchan/attention-module/blob/master/MODELS/cbam.py
The implementation of [1] does not have two conv and bn paris. They just applied channel attention followed by
spatial attention on inputs.
[1] https://github.com/kobiso/CBAM-tensorflow/blob/master/attention_module.py#L39
"""
expansion = 1
def __init__(self, conv_dim, out_channels=32, stride=1, **kwargs):
super(BasicBlock, self).__init__(**kwargs)
# 1. BasicBlock模块中的共有2个卷积;BasicBlock模块中的第1个卷积层;
self.conv1 = regularized_padded_conv(conv_dim, out_channels, kernel_size=3, strides=stride)
self.bn1 = layers.BatchNormalization()
# 2. 第2个;第1个卷积如果做stride就会有一个下采样,在这个里面就不做下采样了。这一块始终保持size一致,把stride固定为1
self.conv2 = regularized_padded_conv(conv_dim, out_channels, kernel_size=3, strides=1)
self.bn2 = layers.BatchNormalization()
# ############################## 注意力机制 ###############################
self.ca = ChannelAttention(conv_dim=conv_dim, in_planes=out_channels)
self.sa = SpatialAttention(conv_dim=conv_dim)
# # 3. 判断stride是否等于1,如果为1就是没有降采样。
# if stride != 1 or in_channels != self.expansion * out_channels:
# self.shortcut = Sequential([regularized_padded_conv(self.expansion * out_channels,
# kernel_size=1, strides=stride),
# layers.BatchNormalization()])
# else:
# self.shortcut = lambda x, _: x
def call(self, inputs, training=False):
out = self.conv1(inputs)
out = self.bn1(out, training=training)
out = tf.nn.relu(out)
out = self.conv2(out)
out = self.bn2(out, training=training)
# ############################## 注意力机制 ###############################
out = self.ca(out) * out
out = self.sa(out) * out
# out = out + self.shortcut(inputs, training)
# out = tf.nn.relu(out)
return out
class scaled_dot_product_attention(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __call__(self, q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
matmul_qk = tf.matmul(q, k, transpose_b=True) # (..., seq_len_q, seq_len_k)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1, name='scaled_dot_prod_attn_weights') # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v, name='scaled_dot_prod_attn_outs') # (..., seq_len_q, depth_v)
return output, attention_weights
MHW_COUNTER = 0
ENC_COUNTER = 0
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, **kwargs):
super(MultiHeadAttention, self).__init__(**kwargs)
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
global MHW_COUNTER
MHW_COUNTER += 1
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model, name=f"wq_{MHW_COUNTER}")
self.wk = tf.keras.layers.Dense(d_model, name=f"wk_{MHW_COUNTER}")
self.wv = tf.keras.layers.Dense(d_model, name=f"wv_{MHW_COUNTER}")
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def __call__(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
)(q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention,
perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def point_wise_feed_forward_network(d_model, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='swish', name='swished_dense'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model, name='ffn_output') # (batch_size, seq_len, d_model)
])
class EncoderLayer(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads, dff, rate=0.1, **kwargs):
super(EncoderLayer, self).__init__(**kwargs)
global MHW_COUNTER
MHW_COUNTER += 1
self.mha = MultiHeadAttention(d_model, num_heads)
# self.ffn = point_wise_feed_forward_network(d_model, dff)
self.swished_dense = layers.Dense(dff, activation='swish', name=f'swished_dense_{MHW_COUNTER}')
self.ffn_output = layers.Dense(d_model, name=f'ffn_output_{MHW_COUNTER}')
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def __call__(self, x, training=True, mask=None):
attn_output, attn_weights = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
# ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
temp = self.swished_dense(out1)
ffn_output = self.ffn_output(temp)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output) # (batch_size, input_seq_len, d_model)
return out2, attn_weights
class TransformerBlocks(tf.keras.layers.Layer):
"""
This layer stacks Transformers on top of each other.
Example
-------
>>> import numpy as np
>>> from tensorflow.keras.models import Model
>>> from tensorflow.keras.layers import Input, Dense
>>> from ai4water.models._tensorflow import TransformerBlocks
>>> inp = Input(shape=(10, 32))
>>> out, _ = TransformerBlocks(4, 4, 32)(inp)
>>> out = Dense(1)(out)
>>> model = Model(inputs=inp, outputs=out)
>>> model.compile(optimizer="Adam", loss="mse")
>>> x = np.random.random((100, 10, 32))
>>> y = np.random.random(100)
>>> h = model.fit(x,y)
"""
def __init__(
self,
num_blocks:int,
num_heads:int,
embed_dim:int,
name:str = "TransformerBlocks",
**kwargs
):
"""
Parameters
-----------
num_blocks : int
num_heads : int
embed_dim : int
**kwargs :
additional keyword arguments for :class:`ai4water.models.tensorflow.Transformer`
"""
super(TransformerBlocks, self).__init__(name=name)
self.num_blocks = num_blocks
self.num_heads = num_heads
self.embed_dim = embed_dim
self.blocks = []
for n in range(num_blocks):
self.blocks.append(Transformer(num_heads, embed_dim, **kwargs))
def get_config(self)->dict:
config = {
"num_blocks": self.num_blocks,
"num_heads": self.num_heads,
"embed_dim": self.embed_dim
}
return config
def __call__(self, inputs, *args, **kwargs):
attn_weights_list = []
for transformer in self.blocks:
inputs, attn_weights = transformer(inputs)
attn_weights_list.append(tf.reduce_sum(attn_weights[:, :, 0, :]))
importances = tf.reduce_sum(tf.stack(attn_weights_list), axis=0) / (
self.num_blocks * self.num_heads)
return inputs, importances
class Transformer(tf.keras.layers.Layer):
"""
A basic transformer block consisting of
LayerNormalization -> Add -> MultiheadAttention -> MLP ->
Example
-------
>>> import numpy as np
>>> from tensorflow.keras.models import Model
>>> from tensorflow.keras.layers import Input, Dense
>>> from ai4water.models._tensorflow import Transformer
>>> inp = Input(shape=(10, 32))
>>> out, _ = Transformer(4, 32)(inp)
>>> out = Dense(1)(out)
>>> model = Model(inputs=inp, outputs=out)
>>> model.compile(optimizer="Adam", loss="mse")
>>> x = np.random.random((100, 10, 32))
>>> y = np.random.random(100)
>>> h = model.fit(x,y)
"""
def __init__(
self,
num_heads:int = 4,
embed_dim:int=32,
dropout=0.1,
post_norm:bool = True,
prenorm_mlp:bool = False,
num_dense_lyrs:int = 1,
seed:int = 313,
*args,
**kwargs
):
"""
Parameters
-----------
num_heads : int
number of attention heads
embed_dim : int
embedding dimension. This value is also used for units/neurons in MLP blocl
dropout : float
dropout rate in MLP blocl
post_norm : bool (default=True)
whether to apply LayerNormalization on the outputs or not.
prenorm_mlp : bool
whether to apply LayerNormalization on inputs of MLP or not
num_dense_lyrs : int
number of Dense layers in MLP block.
"""
super(Transformer, self).__init__(*args, **kwargs)
self.num_heads = num_heads
self.embed_dim = embed_dim
self.dropout = dropout
self.post_norm = post_norm
self.prenorm_mlp = prenorm_mlp
self.seed = seed
assert num_dense_lyrs <= 2
self.num_dense_lyrs = num_dense_lyrs
self.att = tf.keras.layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim,
dropout=dropout
)
self.skip1 = tf.keras.layers.Add()
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.ffn = self._make_mlp()
self.skip2 = tf.keras.layers.Add()
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
def _make_mlp(self):
lyrs = []
if self.prenorm_mlp:
lyrs += [tf.keras.layers.LayerNormalization(epsilon=1e-6)]
lyrs += [
Dense(self.embed_dim, activation=tf.keras.activations.gelu),
tf.keras.layers.Dropout(self.dropout, seed=self.seed),
]
if self.num_dense_lyrs>1:
lyrs += [tf.keras.layers.Dense(self.embed_dim)]
return tf.keras.Sequential(lyrs)
def get_config(self)->dict:
config = {
"num_heads": self.num_heads,
"embed_dim": self.embed_dim,
"dropout": self.dropout,
"post_norm": self.post_norm,
"pre_norm_mlp": self.prenorm_mlp,
"seed": self.seed,
"num_dense_lyrs": self.num_dense_lyrs
}
return config
def __call__(self, inputs, *args, **kwargs):
inputs = self.layernorm1(inputs)
attention_output, att_weights = self.att(
inputs, inputs, return_attention_scores=True
)
attention_output = self.skip1([inputs, attention_output])
feedforward_output = self.ffn(attention_output)
outputs = self.skip2([feedforward_output, attention_output])
if self.post_norm:
return self.layernorm2(outputs), att_weights
return outputs, att_weights
class NumericalEmbeddings(layers.Layer):
def __init__(
self,
num_features,
emb_dim,
*args,
**kwargs
):
self.num_features = num_features
self.emb_dim = emb_dim
super(NumericalEmbeddings, self).__init__(*args, **kwargs)
def build(self, input_shape):
w_init = tf.random_normal_initializer()
# features, n_bins, emb_dim
self.linear_w = tf.Variable(
initial_value=w_init(
shape=(self.num_features, 1, self.emb_dim), dtype='float32'
), trainable=True, name="NumEmbeddingWeights")
# features, n_bins, emb_dim
self.linear_b = tf.Variable(
w_init(
shape=(self.num_features, 1), dtype='float32'
), trainable=True, name="NumEmbeddingBias")
return
def get_config(self)->dict:
config = {
"num_features": self.num_features,
"emb_dim": self.emb_dim
}
return config
def call(self, X, *args, **kwargs):
embs = tf.einsum('f n e, b f -> bfe', self.linear_w, X)
embs = tf.nn.relu(embs + self.linear_b)
return embs
class CatEmbeddings(layers.Layer):
"""
The layer to encode categorical features.
Parameters
-----------
vocabulary : dict
embed_dim : int
dimention of embedding for each categorical feature
lookup_kws : dict
keyword arguments that will go to StringLookup layer
"""
def __init__(
self,
vocabulary:dict,
embed_dim:int = 32,
lookup_kws:dict = None,
*args,
**kwargs
):
super(CatEmbeddings, self).__init__(*args, **kwargs)
self.vocabulary = vocabulary
self.embed_dim = embed_dim
self.lookup_kws = lookup_kws
self.lookups = {}
self.embedding_lyrs = {}
self.feature_names = []
_lookup_kws = dict(mask_token=None,
num_oov_indices=0,
output_mode="int")
if lookup_kws is not None:
_lookup_kws.update(lookup_kws)
for feature_name, vocab in vocabulary.items():
lookup = layers.StringLookup(
vocabulary=vocab,
**_lookup_kws
)
self.lookups[feature_name] = lookup
embedding = layers.Embedding(
input_dim=len(vocab), output_dim=embed_dim
)
self.embedding_lyrs[feature_name] = embedding
self.feature_names.append(feature_name)
def get_config(self)->dict:
config = {
"lookup_kws": self.lookup_kws,
"embed_dim": self.embed_dim,
"vocabulary": self.vocabulary
}
return config
def call(self, inputs, *args, **kwargs):
"""
The tensors in `inputs` list must be in same
order as in the `vocabulary` dictionary.
Parameters
-------------
inputs : list
a list of tensors of shape (None,)
Returns
-------
a tensor of shape (None, num_cat_features, embed_dim)
"""
encoded_features = []
for idx, feat_name in enumerate(self.feature_names):
feat_input = inputs[:, idx]
lookup = self.lookups[feat_name]
encoded_feature = lookup(feat_input)
embedding = self.embedding_lyrs[feat_name]
encoded_categorical_feature = embedding(encoded_feature)
encoded_features.append(encoded_categorical_feature)
cat_embeddings = tf.stack(encoded_features, axis=1)
return cat_embeddings
class TabTransformer(layers.Layer):
"""
tensorflow/keras layer which implements logic of TabTransformer model.
The TabTransformer layer converts categorical features into contextual embeddings
by passing them into Transformer block. The output of Transformer block is
concatenated with numerical features and passed through an MLP to
get the final model output.
It is available only in tensorflow >= 2.6
"""
def __init__(
self,
num_numeric_features: int,
cat_vocabulary: dict,
hidden_units=32,
lookup_kws:dict=None,
num_heads: int = 4,
depth: int = 4,
dropout: float = 0.1,
num_dense_lyrs: int = 2,
prenorm_mlp: bool = True,
post_norm: bool = True,
final_mlp_units = 16,
final_mpl_activation:str = "selu",
seed: int = 313,
*args, **kwargs
):
"""
Parameters
----------
num_numeric_features : int
number of numeric features to be used as input.
cat_vocabulary : dict
a dictionary whose keys are names of categorical features and values
are lists which consist of unique values of categorical features.
You can use the function :py:meth:`ai4water.models.utils.gen_cat_vocab`
to create this for your own data. The length of dictionary should be
equal to number of categorical features. If it is None, then this
layer expects only numeri features
hidden_units : int, optional (default=32)
number of hidden units
num_heads : int, optional (default=4)
number of attention heads
depth : int (default=4)
number of transformer blocks to be stacked on top of each other
dropout : int, optional (default=0.1)
droput rate in transformer
post_norm : bool (default=True)
prenorm_mlp : bool (default=True)
num_dense_lyrs : int (default=2)
number of dense layers in MLP block inside the Transformer
final_mlp_units : int (default=16)
number of units/neurons in final MLP layer i.e. the MLP layer
after Transformer block
"""
super(TabTransformer, self).__init__(*args, **kwargs)
self.cat_vocabulary = cat_vocabulary
self.num_numeric_inputs = num_numeric_features
self.hidden_units = hidden_units
self.lookup_kws = lookup_kws
self.num_heads = num_heads
self.depth = depth
self.dropout = dropout
self.final_mlp_units = final_mlp_units
self.final_mpl_activation = final_mpl_activation
self.seed = seed
self.cat_embs = CatEmbeddings(
vocabulary=cat_vocabulary,
embed_dim=hidden_units,
lookup_kws=lookup_kws
)
# layer normalization of numerical features
self.lyr_norm = layers.LayerNormalization(epsilon=1e-6)
self.transformers = TransformerBlocks(
embed_dim=hidden_units,
num_heads=num_heads,
num_blocks=depth,
num_dense_lyrs=num_dense_lyrs,
post_norm=post_norm,
prenorm_mlp=prenorm_mlp,
dropout=dropout,
seed=seed
)
self.flatten = layers.Flatten()
self.concat = layers.Concatenate()
self.mlp = self.create_mlp(
activation=self.final_mpl_activation,
normalization_layer=layers.BatchNormalization(),
name="MLP",
)
# Implement an MLP block
def create_mlp(
self,
activation,
normalization_layer,
name=None
):
if isinstance(self.final_mlp_units, int):
hidden_units = [self.final_mlp_units]
else:
assert isinstance(self.final_mlp_units, list)
hidden_units = self.final_mlp_units
mlp_layers = []
for units in hidden_units:
mlp_layers.append(normalization_layer),
mlp_layers.append(layers.Dense(units, activation=activation))
mlp_layers.append(layers.Dropout(self.dropout, seed=self.seed))
return tf.keras.Sequential(mlp_layers, name=name)
def __call__(self, inputs:list, *args, **kwargs):
"""
inputs :
list of 2. The first tensor is numerical inputs and second
tensor is categorical inputs
"""
num_inputs = inputs[0]
cat_inputs = inputs[1]
cat_embs = self.cat_embs(cat_inputs)
transformer_outputs, imp = self.transformers(cat_embs)
flat_transformer_outputs = self.flatten(transformer_outputs)
num_embs = self.lyr_norm(num_inputs)
x = self.concat([num_embs, flat_transformer_outputs])
return self.mlp(x), imp
class FTTransformer(layers.Layer):
"""
tensorflow/keras layer which implements logic of FTTransformer model.
In FTTransformer, both categorical and numerical features are passed
through transformer block and then passed through MLP layer to get
the final model prediction.
"""
def __init__(
self,
num_numeric_features: int,
cat_vocabulary: Union[dict, None] = None,
hidden_units=32,
num_heads: int = 4,
depth: int = 4,
dropout: float = 0.1,
lookup_kws:dict = None,
num_dense_lyrs: int = 2,
post_norm: bool = True,
final_mlp_units: int = 16,
with_cls_token:bool = False,
seed: int = 313,
*args,
**kwargs
):
"""
Parameters
----------
num_numeric_features : int
number of numeric features to be used as input.
cat_vocabulary : dict/None
a dictionary whose keys are names of categorical features and values
are lists which consist of unique values of categorical features.
You can use the function :py:meth:`ai4water.models.utils.gen_cat_vocab`
to create this for your own data. The length of dictionary should be
equal to number of categorical features. If it is None, then this
layer expects only numeri features
hidden_units : int, optional (default=32)
number of hidden units
num_heads : int, optional (default=4)
number of attention heads
depth : int (default=4)
number of transformer blocks to be stacked on top of each other
dropout : float, optional (default=0.1)
droput rate in transformer
lookup_kws : dict
keyword arguments for lookup layer
post_norm : bool (default=True)
num_dense_lyrs : int (default=2)
number of dense layers in MLP block inside the Transformer
final_mlp_units : int (default=16)
number of units/neurons in final MLP layer i.e. the MLP layer
after Transformer block
with_cls_token : bool (default=False)
whether to use cls token or not
seed : int
seed for reproducibility
"""
super(FTTransformer, self).__init__(*args, **kwargs)
self.cat_vocabulary = cat_vocabulary
self.num_numeric_inputs = num_numeric_features
self.hidden_units = hidden_units
self.num_heads = num_heads
self.depth = depth
self.dropout = dropout
self.final_mlp_units = final_mlp_units
self.with_cls_token = with_cls_token
self.seed = seed
if cat_vocabulary is not None:
self.cat_embs = CatEmbeddings(
vocabulary=cat_vocabulary,
embed_dim=hidden_units,
lookup_kws=lookup_kws
)
self.num_embs = NumericalEmbeddings(
num_features=num_numeric_features,
emb_dim=hidden_units
)
if cat_vocabulary is not None:
self.concat = layers.Concatenate(axis=1)
self.transformers = TransformerBlocks(
embed_dim=hidden_units,
num_heads=num_heads,
num_blocks=depth,
num_dense_lyrs=num_dense_lyrs,
post_norm=post_norm,
dropout=dropout,
seed=seed
)
self.lmbda = tf.keras.layers.Lambda(lambda x: x[:, 0, :])
self.lyr_norm = layers.LayerNormalization(epsilon=1e-6)
self.mlp = layers.Dense(final_mlp_units)
def build(self, input_shape):
if self.with_cls_token:
# CLS token
w_init = tf.random_normal_initializer()
self.cls_weights = tf.Variable(
initial_value=w_init(shape=(1, self.hidden_units), dtype="float32"),
trainable=True,
)
return
def __call__(self, inputs:list, *args, **kwargs):
"""
inputs :
If categorical variables are considered then inputs is a list of 2.
The first tensor is numerical inputs and second tensor is categorical inputs.
If categorical variables are not considered then inputs is just a single
tensor!
"""
if self.cat_vocabulary is None:
if isinstance(inputs, list):
assert len(inputs) == 1
num_inputs = inputs[0]
else:
num_inputs = inputs
else:
assert len(inputs) == 2
num_inputs = inputs[0]
cat_inputs = inputs[1]
# cls_tokens = tf.repeat(self.cls_weights, repeats=tf.shape(inputs[self.numerical[0]])[0], axis=0)
# cls_tokens = tf.expand_dims(cls_tokens, axis=1)
num_embs = self.num_embs(num_inputs)
if self.cat_vocabulary is None:
embs = num_embs
else:
cat_embs = self.cat_embs(cat_inputs)
embs = self.concat([num_embs, cat_embs])
x, imp = self.transformers(embs)
x = self.lmbda(x)
x = self.lyr_norm(x)
return self.mlp(x), imp
class Conditionalize(tf.keras.layers.Layer):
"""Mimics the behaviour of cond_rnn of Philipperemy but puts the logic
of condition in a separate layer so that it becomes easier to use it.
Example
--------
>>> from ai4water.models._tensorflow import Conditionalize
>>> from tensorflow.keras.layers import Input, LSTM
>>> i = Input(shape=(10, 3))
>>> raw_conditions = Input(shape=(14,))
>>> processed_conds = Conditionalize(32)([raw_conditions, raw_conditions, raw_conditions])
>>> rnn = LSTM(32)(i, initial_state=[processed_conds, processed_conds])
This layer can also be used in ai4water model when defining the model
using declarative model definition style
>>> from ai4water import Model
>>> import numpy as np
>>> model = Model(model={"layers": {
... "Input": {"shape": (10, 3)},
... "Input_cat": {"shape": (10,)},
... "Conditionalize": {"config": {"units": 32, "name": "h_state"},
... "inputs": "Input_cat"},
... "LSTM": {"config": {"units": 32},
... "inputs": "Input",
... 'call_args': {'initial_state': ['h_state', 'h_state']}},
... "Dense": {"units": 1}}},
... ts_args={"lookback": 10}, verbosity=0, epochs=1)
... # define the input and call the .fit method
>>> x1 = np.random.random((100, 10, 3))
>>> x2 = np.random.random((100, 10))
>>> y = np.random.random(100)
>>> h = model.fit(x=[x1, x2], y=y)
"""
def __init__(self, units,
max_num_cond=10,
use_bias:bool = True,
**kwargs):
self.units = units
super().__init__(**kwargs)
# single cond
self.cond_to_init_state_dense_1 = tf.keras.layers.Dense(units=self.units,
use_bias=use_bias,
name="conditional_dense")
# multi cond
self.multi_cond_to_init_state_dense = []
for i in range(max_num_cond):
self.multi_cond_to_init_state_dense.append(tf.keras.layers.Dense(
units=self.units,
use_bias=use_bias,
name=f"conditional_dense{i}"))
self.multi_cond_p = tf.keras.layers.Dense(1, activation=None, use_bias=True, name="conditional_dense_out")
@staticmethod
def _standardize_condition(initial_cond):
assert len(initial_cond.shape) == 2, initial_cond.shape
return initial_cond
def __call__(self, inputs, *args, **kwargs):
if args or kwargs:
raise ValueError(f"Unrecognized input arguments\n args: {args} \nkwargs: {kwargs}")
if inputs.__class__.__name__ in ("Tensor", "KerasTensor"):
inputs = [inputs]
assert isinstance(inputs, (list, tuple)) and len(inputs) >= 1, f"{type(inputs)}"
cond = inputs
if len(cond) > 1: # multiple conditions.
init_state_list = []
for idx, c in enumerate(cond):
init_state_list.append(self.multi_cond_to_init_state_dense[idx](self._standardize_condition(c)))
multi_cond_state = tf.stack(init_state_list, axis=-1) # -> (?, units, num_conds)
multi_cond_state = self.multi_cond_p(multi_cond_state) # -> (?, units, 1)
cond_state = tf.squeeze(multi_cond_state, axis=-1) # -> (?, units)
else:
cond = self._standardize_condition(cond[0])
cond_state = self.cond_to_init_state_dense_1(cond) # -> (?, units)
return cond_state
class _NormalizedGate(Layer):
_Normalizers = {
'relu': tf.nn.relu,
'sigmoid': tf.nn.sigmoid
}
def __init__(self, in_features, out_shape, normalizer="relu"):
super(_NormalizedGate, self).__init__()
self.in_features = in_features
self.out_shape = out_shape
self.normalizer = self._Normalizers[normalizer]
self.fc = Dense(out_shape[0]*out_shape[1],
use_bias=True,
kernel_initializer="Orthogonal",
bias_initializer="zeros")
def call(self, inputs):
h = self.fc(inputs)
h = tf.reshape(h, (-1, *self.out_shape))
h = self.normalizer(h)
normalized, _ = tf.linalg.normalize(h, axis=-1)
return normalized
class _MCLSTMCell(Layer):
"""
Examples
--------
m_inp = tf.range(50, dtype=tf.float32)
m_inp = tf.reshape(m_inp, (5, 10, 1))
aux_inp = tf.range(150, dtype=tf.float32)
aux_inp = tf.reshape(aux_inp, (5, 10, 3))
cell = _MCLSTMCell(1, 3, 8)
m_out_, ct_ = cell(m_inp, aux_inp)
"""
def __init__(
self,
mass_input_size,
aux_input_size,
units,
time_major:bool = False,
):
super(_MCLSTMCell, self).__init__()
self.units = units
self.time_major = time_major
gate_inputs = aux_input_size + self.units + mass_input_size
self.output_gate = Dense(self.units,
activation="sigmoid",
kernel_initializer="Orthogonal",
bias_initializer="zeros",
name="sigmoid_gate")
self.input_gate = _NormalizedGate(gate_inputs,
(mass_input_size, self.units),
"sigmoid")
self.redistribution = _NormalizedGate(gate_inputs,
(self.units, self.units),
"relu")
def call(self, x_m, x_a, ct=None):
if not self.time_major:
# (batch_size, lookback, input_features) -> (lookback, batch_size, input_features)
x_m = tf.transpose(x_m, [1, 0, 2])
x_a = tf.transpose(x_a, [1, 0, 2])
lookback_steps, batch_size, _ = x_m.shape
if ct is None:
ct = tf.zeros((batch_size, self.units))
m_out, c = [], []
for time_step in range(lookback_steps):
mt_out, ct = self._step(x_m[time_step], x_a[time_step], ct)
m_out.append(mt_out)
c.append(ct)
m_out, c = tf.stack(m_out), tf.stack(c) # (lookback, batch_size, units)
return m_out, c
def _step(self, xt_m, xt_a, c):
features = tf.concat([xt_m, xt_a, c / (tf.norm(c) + 1e-5)], axis=-1) # (examples, ?)
# compute gate activations
i = self.input_gate(features) # (examples, 1, units)
r = self.redistribution(features) # (examples, units, units)
o = self.output_gate(features) # (examples, units)
m_in = tf.squeeze(tf.matmul(tf.expand_dims(xt_m, axis=-2), i), axis=-2)
m_sys = tf.squeeze(tf.matmul(tf.expand_dims(c, axis=-2), r), axis=-2)
m_new = m_in + m_sys
return tf.multiply(o, m_new), tf.multiply(tf.subtract(1.0, o), m_new)
class MCLSTM(Layer):
"""Mass-Conserving LSTM model from Hoedt et al. [1]_.
This implementation follows of NeuralHydrology's implementation of MCLSTM
with some changes:
1) reduced sum is not performed for over the units
2) time_major argument is added
3) no implementation of Embedding
Examples
--------
>>> from ai4water.models._tensorflow import MCLSTM
>>> import tensorflow as tf
>>> inputs = tf.range(150, dtype=tf.float32)
>>> inputs = tf.reshape(inputs, (10, 5, 3))
>>> mc = MCLSTM(1, 2, 8, 1)
>>> h = mc(inputs) # (batch, units)
...
>>> mc = MCLSTM(1, 2, 8, 1, return_sequences=True)
>>> h = mc(inputs) # (batch, lookback, units)
...
>>> mc = MCLSTM(1, 2, 8, 1, return_state=True)
>>> _h, _o, _c = mc(inputs) # (batch, lookback, units)
...
>>> mc = MCLSTM(1, 2, 8, 1, return_state=True, return_sequences=True)
>>> _h, _o, _c = mc(inputs) # (batch, lookback, units)
...
... # with time_major as True
>>> inputs = tf.range(150, dtype=tf.float32)
>>> inputs = tf.reshape(inputs, (5, 10, 3))
>>> mc = MCLSTM(1, 2, 8, 1, time_major=True)
>>> _h = mc(inputs) # (batch, units)
...
>>> mc = MCLSTM(1, 2, 8, 1, time_major=True, return_sequences=True)
>>> _h = mc(inputs) # (lookback, batch, units)
...
>>> mc = MCLSTM(1, 2, 8, 1, time_major=True, return_state=True)
>>> _h, _o, _c = mc(inputs) # (batch, units), ..., (lookback, batch, units)
...
... # end to end keras Model
>>> from tensorflow.keras.layers import Dense, Input
>>> from tensorflow.keras.models import Model
>>> import numpy as np
...
>>> inp = Input(batch_shape=(32, 10, 3))
>>> lstm = MCLSTM(1, 2, 8)(inp)
>>> out = Dense(1)(lstm)
...
>>> model = Model(inputs=inp, outputs=out)
>>> model.compile(loss='mse')
...
>>> x = np.random.random((320, 10, 3))
>>> y = np.random.random((320, 1))
>>> y = model.fit(x=x, y=y)
References
----------
.. [1] https://arxiv.org/abs/2101.05186
"""
def __init__(
self,
num_mass_inputs,
dynamic_inputs,
units,
num_targets=1,
time_major:bool = False,
return_sequences:bool = False,
return_state:bool = False,
name="MCLSTM",
**kwargs
):
"""
Parameters
----------
num_targets : int
number of inputs for which mass balance is to be reserved.
dynamic_inputs :
number of inpts other than mass_targets
units :
hidden size, determines the size of weight matrix
time_major : bool, optional (default=True)
if True, the data is expected to be of shape (lookback, batch_size, input_features)
otherwise, data is expected of shape (batch_size, lookback, input_features)
"""
super(MCLSTM, self).__init__(name=name, **kwargs)
assert num_mass_inputs ==1
assert units>1
assert num_targets==1
self.n_mass_inputs = num_mass_inputs
self.units = units
self.n_aux_inputs = dynamic_inputs
self.time_major = time_major
self.return_sequences = return_sequences
self.return_state = return_state
self.mclstm = _MCLSTMCell(
self.n_mass_inputs,
self.n_aux_inputs,
self.units,
self.time_major,
)
def call(self, inputs):
x_m = inputs[:, :, :self.n_mass_inputs] # (batch, lookback, 1)
x_a = inputs[:, :, self.n_mass_inputs:] # (batch, lookback, dynamic_inputs)
output, c = self.mclstm(x_m, x_a) # (lookback, batch, units)
# unlike NeuralHydrology, we don't preform reduced sum over units
# to keep with the convention in keras/lstm
#output = tf.math.reduce_sum(output[:, :, 1:], axis=-1, keepdims=True)
if self.time_major:
h, m_out, c = output, output, c
if not self.return_sequences:
h = h[-1]
else:
h = tf.transpose(output, [1, 0, 2]) # -> (batch_size, lookback, 1)
#m_out = tf.transpose(output, [1, 0, 2]) # -> (batch_size, lookback, 1)
c = tf.transpose(c, [1, 0, 2]) # -> (batch_size, lookback, units)
if not self.return_sequences:
h = h[:, -1]
if self.return_state:
return h, h, c
return h
class EALSTM(Layer):
"""Entity Aware LSTM as proposed by Kratzert et al., 2019 [1]_
The difference here is that a Dense layer is not applied on cell state as done in
original implementation in NeuralHydrology [2]_. This is left to user's discretion.
Examples
--------
>>> from ai4water.models._tensorflow import EALSTM
>>> import tensorflow as tf
>>> batch_size, lookback, num_dyn_inputs, num_static_inputs, units = 10, 5, 3, 2, 8
>>> inputs = tf.range(batch_size*lookback*num_dyn_inputs, dtype=tf.float32)
>>> inputs = tf.reshape(inputs, (batch_size, lookback, num_dyn_inputs))
>>> stat_inputs = tf.range(batch_size*num_static_inputs, dtype=tf.float32)
>>> stat_inputs = tf.reshape(stat_inputs, (batch_size, num_static_inputs))
>>> lstm = EALSTM(units, num_static_inputs)
>>> h_n = lstm(inputs, stat_inputs) # -> (batch_size, units)
...
... # with return sequences
>>> lstm = EALSTM(units, num_static_inputs, return_sequences=True)
>>> h_n = lstm(inputs, stat_inputs) # -> (batch, lookback, units)
...
... # with return sequences and return_state
>>> lstm = EALSTM(units, num_static_inputs, return_sequences=True, return_state=True)
>>> h_n, [c_n, y_hat] = lstm(inputs, stat_inputs) # -> (batch, lookback, units), [(), ()]
...
... # end to end Keras model
>>> from tensorflow.keras.models import Model
>>> from tensorflow.keras.layers import Input, Dense
>>> import numpy as np
>>> inp_dyn = Input(batch_shape=(batch_size, lookback, num_dyn_inputs))
>>> inp_static = Input(batch_shape=(batch_size, num_static_inputs))
>>> lstm = EALSTM(units, num_static_inputs)(inp_dyn, inp_static)
>>> out = Dense(1)(lstm)
>>> model = Model(inputs=[inp_dyn, inp_static], outputs=out)
>>> model.compile(loss='mse')
>>> print(model.summary())
... # generate hypothetical data and train it
>>> dyn_x = np.random.random((100, lookback, num_dyn_inputs))
>>> static_x = np.random.random((100, num_static_inputs))
>>> y = np.random.random((100, 1))
>>> h = model.fit(x=[dyn_x, static_x], y=y, batch_size=batch_size)
References
----------
.. [1] https://doi.org/10.5194/hess-23-5089-2019
.. [2] https://github.com/neuralhydrology/neuralhydrology
"""
def __init__(
self,
units:int,
num_static_inputs:int,
use_bias:bool=True,
activation = "tanh",
recurrent_activation="sigmoid",
static_activation="sigmoid",
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
static_initializer = "glorot_uniform",
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
static_constraint=None,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
static_regularizer=None,
return_state=False,
return_sequences=False,
time_major=False,
**kwargs
):
"""
Parameters
----------
units : int
number of units
num_static_inputs : int
number of static features
static_activation :
activation function for static input gate
static_regularizer :
static_constraint :
static_initializer :
"""
super(EALSTM, self).__init__(**kwargs)
self.units = units
self.num_static_inputs = num_static_inputs
self.activation = activations.get(activation)
self.rec_activation = activations.get(recurrent_activation)
self.static_activation = static_activation
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.static_initializer = initializers.get(static_initializer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.static_constraint = static_constraint
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.static_regularizer = static_regularizer
self.return_state = return_state
self.return_sequences = return_sequences
self.time_major=time_major
self.input_gate = Dense(units,
use_bias=self.use_bias,
kernel_initializer=self.static_initializer,
bias_initializer=self.bias_initializer,
activation=self.static_activation,
kernel_constraint=self.static_constraint,
bias_constraint=self.bias_constraint,
kernel_regularizer=self.static_regularizer,
bias_regularizer=self.bias_regularizer,
name="input_gate")
def call(self, inputs, static_inputs, initial_state=None, **kwargs):
"""
static_inputs :
of shape (batch, num_static_inputs)
"""
if not self.time_major:
inputs = tf.transpose(inputs, [1, 0, 2])
lookback, batch_size, _ = inputs.shape
if initial_state is None:
initial_state = tf.zeros((batch_size, self.units)) # todo
state = [initial_state, initial_state]
else:
state = initial_state
# calculate input gate only once because inputs are static
inp_g = self.input_gate(static_inputs) # (batch, num_static_inputs) -> (batch, units)
outputs, states = [], []
for time_step in range(lookback):
_out, state = self.cell(inputs[time_step], inp_g, state)
outputs.append(_out)
states.append(state)
outputs = tf.stack(outputs)
h_s = tf.stack([states[i][0] for i in range(lookback)])
c_s = tf.stack([states[i][1] for i in range(lookback)])
if not self.time_major:
outputs = tf.transpose(outputs, [1, 0, 2])
h_s = tf.transpose(h_s, [1, 0, 2])
c_s = tf.transpose(c_s, [1, 0, 2])
states = [h_s, c_s]
last_output = outputs[:, -1]
else:
states = [h_s, c_s]
last_output = outputs[-1]
h = last_output
if self.return_sequences:
h = outputs
if self.return_state:
return h, states
return h
def cell(self, inputs, i, states):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
k_f, k_c, k_o = array_ops.split(self.kernel, num_or_size_splits=3, axis=1)
x_f = K.dot(inputs, k_f)
x_c = K.dot(inputs, k_c)
x_o = K.dot(inputs, k_o)
if self.use_bias:
b_f, b_c, b_o = array_ops.split(
self.bias, num_or_size_splits=3, axis=0)
x_f = K.bias_add(x_f, b_f)
x_c = K.bias_add(x_c, b_c)
x_o = K.bias_add(x_o, b_o)
# forget gate
f = self.rec_activation(x_f + K.dot(h_tm1, self.rec_kernel[:, :self.units]))
# cell state
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1, self.rec_kernel[:, self.units:self.units * 2]))
# output gate
o = self.rec_activation(x_o + K.dot(h_tm1, self.rec_kernel[:, self.units * 2:]))
h = o * self.activation(c)
return h, [h, c]
def build(self, input_shape):
"""
kernel, recurrent_kernel and bias are initiated for 3 gates instead
of 4 gates as in original LSTM
"""
input_dim = input_shape[-1]
self.bias = self.add_weight(
shape=(self.units * 3,),
name='bias',
initializer=self.bias_initializer,
constraint=self.bias_constraint,
regularizer=self.bias_regularizer
)
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
constraint=self.kernel_constraint,
regularizer=self.kernel_regularizer
)
self.rec_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
constraint=self.recurrent_constraint,
regularizer=self.recurrent_regularizer
)
self.built = True
return
class PrivateLayers(object):
class layers:
BasicBlock = BasicBlock
CONDRNN = ConditionalRNN
Conditionalize = Conditionalize
MCLSTM = MCLSTM
EALSTM = EALSTM
CatEmbeddings = CatEmbeddings
TransformerBlocks = TransformerBlocks
NumericalEmbeddings = NumericalEmbeddings
TabTransformer = TabTransformer
FTTransformer = FTTransformer | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/models/_tensorflow/private_layers.py | private_layers.py |
__all__ = ['TemporalFusionTransformer']
"""
This file contain most of the code from
https://github.com/google-research/google-research/blob/master/tft/libs/tft_model.py
The TemporalFusionTransformer class has been modified so that it can be used as regular layer and without static,
categorical, observation and future inputs. It has also been modified to use 1D CNN as
an alternative of LSTM
The original code had Apache Licence, Version 2.0 although a lot of
the code has been modified here. http://www.apache.org/licenses/LICENSE-2.0
"""
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import TimeDistributed, Dense, InputLayer, Embedding, Conv1D, Flatten
from .utils import concatenate, gated_residual_network, add_and_norm, apply_gating_layer, get_decoder_mask
from .utils import InterpretableMultiHeadAttention
# Layer definitions.
stack = tf.keras.backend.stack
class TemporalFusionTransformer(tf.keras.layers.Layer):
"""
Implements the model of https://arxiv.org/pdf/1912.09363.pdf
This layer applies variable selection three times. First on static inputs,
then on encoder inputs and then on decoder inputs. The corresponding weights
are called `static_weights`, `historical_weights` and `future_weights` respectively.
1, 11, 21, 31, a
2, 12, 22, 32, b
3, 13, 23, 33, c
4, 14, 24, 34, d
Parameters
---------
hidden_units : int
determines the depth/weight matrices size in TemporalFusionTransformer.
num_encoder_steps : int
lookback steps used in the model.
num_heads : int
must be>=1, number of attention heads to be used in MultiheadAttention layer.
num_inputs : int
number of input features
total_time_steps : int
greater than num_encoder_steps, This is sum of lookback steps + forecast length.
Forecast length is the number of horizons to be predicted.
known_categorical_inputs : list
a,b,c
input_obs_loc :
unknown_inputs :
static_inputs :
static_input_loc None/list:
location of static inputs
category_counts : list
Number of categories per categorical variable
use_cnn : bool
whether to use cnn or not. If not, then lstm will be used otherwise
1D CNN will be used with "causal" padding.
kernel_size : int
kernel size for 1D CNN. Only valid if use_cnn is True.
use_cudnn : bool
default False, Whether to use Keras CuDNNLSTM or standard LSTM layers
dropout_rate : float
default 0.1, >=0 and <=1 amount of dropout to be used at GRNs.
future_inputs : bool
whether the given data contains futre known observations or not.
return_attention_components bool:
If True, then this layer (upon its call) will return outputs + attention
componnets. Attention components are dictionary consisting of following keys
and their values as numpy arrays.
return_sequences bool:
if True, then output and attention weights will consist of encoder_lengths/lookback
and decoder_length/forecast_len. Otherwise predictions for only decoder_length will be
returned.
Example
-------
>>> params = {'num_inputs': 3, 'total_time_steps': 192, 'known_regular_inputs': [0, 1, 2]}
>>> output_size = 1
>>> quantiles = [0.25, 0.5, 0.75]
>>> layers = {
>>> "Input": {"config": {"shape": (params['total_time_steps'], params['num_inputs']), 'name': "Model_Input"}},
>>> "TemporalFusionTransformer": {"config": params},
>>> "lambda": {"config": tf.keras.layers.Lambda(lambda _x: _x[Ellipsis, -1, :])},
>>> "Dense": {"config": {"units": output_size * len(quantiles)}},
>>> 'Reshape': {'target_shape': (3, 1)}}
"""
def __init__(
self,
hidden_units: int,
num_encoder_steps: int,
num_heads: int,
num_inputs: int,
total_time_steps: int,
known_categorical_inputs,
static_input_loc,
category_counts,
known_regular_inputs,
input_obs_loc,
use_cnn: bool = False,
kernel_size: int = None,
# stack_size:int = 1,
use_cudnn: bool = False,
dropout_rate: float = 0.1,
future_inputs: bool = False,
return_attention_components: bool = False,
return_sequences: bool = False,
**kwargs
):
if use_cudnn:
assert kernel_size is not None
self.time_steps = total_time_steps
self.input_size = num_inputs
self.use_cnn = use_cnn
self.kernel_size = kernel_size
self._known_regular_input_idx = known_regular_inputs # [1,2,3]
self._input_obs_loc = input_obs_loc # [0]
self._static_input_loc = static_input_loc # [3,4]
self.category_counts = category_counts # [2, 2]
self._known_categorical_input_idx = known_categorical_inputs
# Network params
self.use_cudnn = use_cudnn # Whether to use GPU optimised LSTM
self.hidden_units = int(hidden_units)
self.dropout_rate = float(dropout_rate)
self.encoder_steps = num_encoder_steps # historical steps/lookback steps
self.num_heads = int(num_heads)
# self.num_stacks= int(stack_size) # todo
self.future_inputs = future_inputs
self.return_attention_components = return_attention_components
self.return_sequences = return_sequences
super().__init__(**kwargs)
def __call__(self, alle_inputs, *args, **kwargs):
"""Returns graph defining layers of the TFT.
"""
# Size definitions.
encoder_steps = self.encoder_steps # encoder_steps
unknown_inputs, known_combined_layer, obs_inputs, static_inputs = self.get_tft_embeddings(alle_inputs)
# known_combined_layer.shape = (num_examples, time_steps, hidden_units, num_outputs)
# obs_inputs.shape = (num_examples, time_steps, hidden_units, 1)
# static_inputs.shape = (num_examples, num_cat_variables, hidden_units)
# Isolate known and observed historical inputs.
if unknown_inputs is not None:
historical_inputs = concatenate([
unknown_inputs[:, :encoder_steps, :],
known_combined_layer[:, :encoder_steps, :],
obs_inputs[:, :encoder_steps, :]
], axis=-1, name="historical_inputs")
else:
if obs_inputs is not None:
# we are extracting only historical data i.e. lookback from obs_inputs
# (num_examples, encoder_steps, hidden_units, 4) <- (num_examples, encoder_steps, hidden_units, num_outputs) | (num_examples, encoder_steps, hidden_units, 1)
historical_inputs = concatenate([
known_combined_layer[:, :encoder_steps, :],
obs_inputs[:, :encoder_steps, :]], axis=-1, name="historical_inputs")
else:
historical_inputs = known_combined_layer[:, :encoder_steps, :]
if self.future_inputs:
assert self.time_steps - self.encoder_steps > 0
# Isolate only known future inputs.
future_inputs = known_combined_layer[:, encoder_steps:, :] # (num_examples, 24, hidden_units, num_outputs)
else:
assert self.time_steps == self.encoder_steps
future_inputs = None
def static_combine_and_mask(embedding):
"""Applies variable selection network to static inputs.
Args:
embedding: Transformed static inputs (num_examples, num_cat_variables, hidden_units)
Returns:
Tensor output for variable selection network
"""
# Add temporal features
_, num_static, _ = embedding.get_shape().as_list()
flatten = tf.keras.layers.Flatten()(embedding) # (num_examples, hidden_units*num_cat_variables)
# Nonlinear transformation with gated residual network.
mlp_outputs = gated_residual_network( # (num_examples, num_cat_variables)
flatten,
self.hidden_units,
output_size=num_static,
dropout_rate=self.dropout_rate,
use_time_distributed=False,
additional_context=None,
name='GRN_static'
)
# (num_examples, num_cat_variables)
sparse_weights = tf.keras.layers.Activation('softmax', name='sparse_static_weights')(mlp_outputs)
sparse_weights = K.expand_dims(sparse_weights, axis=-1) # (num_examples, num_cat_variables, 1)
trans_emb_list = []
for i in range(num_static):
e = gated_residual_network( # e.shape = (num_examples, 1, hidden_units)
embedding[:, i:i + 1, :],
self.hidden_units,
dropout_rate=self.dropout_rate,
use_time_distributed=False,
name=f'GRN_static_{i}'
)
trans_emb_list.append(e)
# (num_examples, num_cat_variables, hidden_units)
transformed_embedding = concatenate(trans_emb_list, axis=1, name="transfomred_embedds")
# (num_examples, num_cat_variables, hidden_units)
combined = tf.keras.layers.Multiply(name="StaticWStaticEmb")(
[sparse_weights, transformed_embedding])
static_vec = K.sum(combined, axis=1) # (num_examples, hidden_units)
return static_vec, sparse_weights
static_context_state_h = None
static_context_state_c = None
static_context_variable_selection = None
static_context_enrichment = None
static_weights = None
if static_inputs is not None:
static_encoder, static_weights = static_combine_and_mask(static_inputs)
# static_encoder.shape = (num_examples, hidden_units)
# static_weights.shape = (num_examples, num_cat_variables, 1)
static_context_variable_selection = gated_residual_network( # (num_examples, hidden_units)
static_encoder,
self.hidden_units,
dropout_rate=self.dropout_rate,
use_time_distributed=False,
name="GNR_st_cntxt_var_select"
)
static_context_enrichment = gated_residual_network( # (num_examples, hidden_units)
static_encoder,
self.hidden_units,
dropout_rate=self.dropout_rate,
use_time_distributed=False,
name="GRN_st_cntxt_enrich"
)
static_context_state_h = gated_residual_network( # (num_examples, hidden_units)
static_encoder,
self.hidden_units,
dropout_rate=self.dropout_rate,
use_time_distributed=False,
name="GRN_st_cntxt_h"
)
static_context_state_c = gated_residual_network( # (num_examples, hidden_units)
static_encoder,
self.hidden_units,
dropout_rate=self.dropout_rate,
use_time_distributed=False,
name="GRN_st_cntxt_c"
)
def lstm_combine_and_mask(embedding, static_context, _name=None):
"""Apply temporal variable selection networks.
Args:
embedding: Transformed inputs. (num_examples, time_steps, hidden_units, num_inputs)
# time_steps can be either encoder_steps or decoder_steps.
# num_inputs can be either encoder_inputs or decoder_inputs
static_context:
_name: name of encompassing layers
Returns:
Processed tensor outputs. (num_examples, time_steps, hidden_units)
"""
# Add temporal features
_, time_steps, embedding_dim, num_inputs = embedding.get_shape().as_list()
flatten = K.reshape(embedding, # (num_examples, time_steps, num_inputs*hidden_units)
[-1, time_steps, embedding_dim * num_inputs])
if static_context is not None:
_expanded_static_context = K.expand_dims( # (num_examples, 1, hidden_units)
static_context, axis=1)
else:
_expanded_static_context = None
# Variable selection weights
mlp_outputs, static_gate = gated_residual_network(
flatten,
self.hidden_units,
output_size=num_inputs,
dropout_rate=self.dropout_rate,
use_time_distributed=False,
additional_context=_expanded_static_context,
return_gate=True,
name=f'GRN_with_{_name}'
)
# mlp_outputs.shape (num_examples, time_steps, num_inputs)
# static_gate.shape (num_examples, time_steps, num_inputs)
# sparse_weights (num_examples, time_steps, num_inputs)
sparse_weights = tf.keras.layers.Activation('softmax', # --> (num_examples, time_steps, num_inputs)
name=f'sparse_{_name}_weights_softmax')(mlp_outputs)
sparse_weights = tf.expand_dims(sparse_weights, axis=2) # (num_examples, time_steps, 1, num_inputs)
# Non-linear Processing & weight application
trans_emb_list = []
for i in range(num_inputs):
grn_output = gated_residual_network( # --> (num_examples, time_steps, hidden_units)
embedding[Ellipsis, i], # feeding (num_examples, time_steps, hidden_units) as input
self.hidden_units,
dropout_rate=self.dropout_rate,
use_time_distributed=False,
name=f'GRN_with_{_name}_for_{i}'
)
trans_emb_list.append(grn_output)
# (num_examples, time_steps, hidden_units, num_inputs)
transformed_embedding = stack(trans_emb_list, axis=-1)
# --> (num_examples, time_steps, hidden_units, num_inputs)
combined = tf.keras.layers.Multiply(name=f'sparse_and_transform_{_name}')(
[sparse_weights, transformed_embedding])
temporal_ctx = K.sum(combined, axis=-1) # (num_examples, time_steps, hidden_units)
return temporal_ctx, sparse_weights, static_gate
historical_features, historical_weights, historical_gate = lstm_combine_and_mask(
historical_inputs,
static_context_variable_selection,
_name='history'
)
# historical_features.shape = (num_examples, encoder_steps, hidden_units)
# historical_flags (num_examples, encoder_steps, 1, 4)
future_features = None
future_weights = None
if future_inputs is not None:
future_features, future_weights, future_gate = lstm_combine_and_mask(
future_inputs,
static_context_variable_selection,
_name='future')
# future_features = (num_examples, decoder_length, hidden_units)
# future_flags = (num_examples, decoder_length, 1, num_outputs)
initial_states = None
if static_context_state_h is not None:
initial_states = [static_context_state_h, static_context_state_c]
if self.use_cnn:
history_lstm = build_cnn(historical_features,
self.hidden_units,
self.kernel_size,
return_state=True,
_name="history",
use_cudnn=self.use_cudnn)
else:
lstm = get_lstm(self.hidden_units, return_state=True, _name="history", use_cudnn=self.use_cudnn)
history_lstm, state_h, state_c = lstm(historical_features, initial_state=initial_states)
# history_lstm = (num_examples, encoder_steps, hidden_units)
if future_features is not None:
if self.use_cnn:
future_lstm = build_cnn(future_features, self.hidden_units, self.kernel_size,
return_state=False, _name="Future", use_cudnn=self.use_cudnn)
else:
lstm = get_lstm(self.hidden_units, return_state=False, _name="Future", use_cudnn=self.use_cudnn)
future_lstm = lstm(future_features, initial_state=[state_h, state_c])
# future_lstm = (num_examples, decoder_length, hidden_units)
lstm_output = concatenate([history_lstm, future_lstm],
axis=1,
name='history_plus_future_lstm') # (num_examples, time_steps, hidden_units)
# Apply gated skip connection
input_embeddings = concatenate([historical_features, future_features],
axis=1,
name="history_plus_future_embeddings"
) # (num_examples, time_steps, hidden_units)
else:
lstm_output = history_lstm
input_embeddings = historical_features
lstm_output, _ = apply_gating_layer( # (num_examples, time_steps, hidden_units)
lstm_output, self.hidden_units, self.dropout_rate, activation=None, name='GatingOnLSTM')
# (num_examples, time_steps, hidden_units)
temporal_feature_layer = add_and_norm([lstm_output, input_embeddings], name='AfterLSTM')
# Static enrichment layers
expanded_static_context = None
if static_context_enrichment is not None:
# (num_examples, 1, hidden_units)
expanded_static_context = K.expand_dims(static_context_enrichment, axis=1)
atten_input, _ = gated_residual_network( # (num_examples, time_steps, hidden_units)
temporal_feature_layer,
self.hidden_units,
dropout_rate=self.dropout_rate,
use_time_distributed=False,
additional_context=expanded_static_context,
return_gate=True,
name='GRN_temp_feature'
)
# Decoder self attention
self_attn_layer = InterpretableMultiHeadAttention(
self.num_heads, self.hidden_units, dropout=self.dropout_rate, name="InterpMultiHeadAtten")
mask = get_decoder_mask(atten_input)
# in some implementation cases, queries contain only decoder_length part but since official google repo
# used all the inputs i.e. encoder+decoder part, we are doing so as well.
# This is more useful in cases since transformer layer can be used as many to many.
# Thus current behaviour is similar to `return_sequences=True` of LSTM.
if self.return_sequences:
queries = atten_input
else:
queries = atten_input[:, self.encoder_steps:]
# queries (batch_size, time_steps, hidden_units
# atten_input (batch_size, time_steps, hidden_units
atten_output, self_att = self_attn_layer(queries,
atten_input, atten_input, mask=mask)
# atten_output (batch_size, time_steps, hidden_units)
# self_att (num_heads, batch_size, time_steps, time_steps
# x = (num_examples, time_steps, hidden_units)
atten_output, _ = apply_gating_layer( # # x = (num_examples, time_steps, hidden_units)
atten_output,
self.hidden_units,
dropout_rate=self.dropout_rate,
activation=None,
name="GatingOnX"
)
# # x = (num_examples, time_steps, hidden_units)
atten_output = add_and_norm([atten_output, queries], name="XAndEnriched")
# Nonlinear processing on outputs
decoder = gated_residual_network( # # x = (num_examples, time_steps, hidden_units)
atten_output,
self.hidden_units,
dropout_rate=self.dropout_rate,
use_time_distributed=False,
name="NonLinearityOnOut"
)
# Final skip connection
decoder, _ = apply_gating_layer(decoder, self.hidden_units, activation=None,
name="FinalSkip") # # x = (num_examples, time_steps, hidden_units)
# (num_examples, time_steps, hidden_units)
transformer_output = add_and_norm([decoder, temporal_feature_layer], name="DecoderAndTempFeature")
# Attention components for explainability
attention_components = {
# Temporal attention weights
'decoder_self_attn': self_att, # (num_atten_heads, num_examples, time_steps, time_steps)
# Static variable selection weights # (num_examples, 1)
'static_variable_selection_weights': static_weights[Ellipsis, 0] if static_weights is not None else None,
# Variable selection weights of past inputs # (num_examples, encoder_steps, input_features)
'encoder_variable_selection_weights': historical_weights[Ellipsis, 0, :],
# Variable selection weights of future inputs
# (num_examples, decoder_steps, input_features)
'decoder_variable_selection_weights': future_weights[Ellipsis, 0, :] if future_weights is not None else None
}
self.attention_components = attention_components
if self.return_attention_components:
return transformer_output, attention_components
return transformer_output
def get_tft_embeddings(self, all_inputs):
"""Transforms raw inputs to embeddings.
Applies linear transformation onto continuous variables and uses embeddings
for categorical variables.
Args:
all_inputs: Inputs to transform [batch_size, time_steps, input_features]
whre time_steps include both lookback and forecast. The input_features dimention of all_inputs can
contain following inputs. `static_inputs`, `obs_inputs`, `categorical_inputs`, `regular_inputs`.
Returns:
Tensors for transformed inputs.
unknown_inputs:
known_combined_layer: contains regular inputs and categorical inputs (all known)
obs_inputs: target values to be used as inputs.
static_inputs
"""
time_steps = self.time_steps
# Sanity checks
for i in self._known_regular_input_idx:
if i in self._input_obs_loc:
raise ValueError('Observation cannot be known a priori!')
if self._input_obs_loc is not None:
for i in self._input_obs_loc:
if i in self._static_input_loc:
raise ValueError('Observation cannot be static!')
if all_inputs.get_shape().as_list()[-1] != self.input_size:
raise ValueError(
'Illegal number of inputs! Inputs observed={}, expected={}'.format(
all_inputs.get_shape().as_list()[-1], self.input_size))
num_categorical_variables = len(self.category_counts) # 1
num_regular_variables = self.input_size - num_categorical_variables # 4
embeddings = []
if num_categorical_variables > 0:
embedding_sizes = [ # [hidden_units]
self.hidden_units for i, size in enumerate(self.category_counts)
]
for i in range(num_categorical_variables):
embedding = tf.keras.Sequential([InputLayer([time_steps]),Embedding(
self.category_counts[i],
embedding_sizes[i],
input_length=time_steps,
dtype=tf.float32)
])
embeddings.append(embedding)
# regular_inputs
regular_inputs, categorical_inputs = all_inputs[:, :, :num_regular_variables], \
all_inputs[:, :, num_regular_variables:]
# regular_inputs (num_examples, time_steps, 4)
# categorical_inputs (num_examples, time_steps, num_static_inputs)
# list of lengnth=(num_static_inputs) with shape (num_examples, time_steps, hidden_units)
embedded_inputs = [
embeddings[i](categorical_inputs[Ellipsis, i])
for i in range(num_categorical_variables)
]
else:
embedded_inputs = []
# --> (num_examples, total_time_steps, num_inputs)
regular_inputs = all_inputs[:, :, :num_regular_variables]
categorical_inputs = None
# Static inputs
if len(self._static_input_loc) > 0: # [4]
static_inputs = [tf.keras.layers.Dense(self.hidden_units, name=f'StaticInputs{i}')(
regular_inputs[:, 0, i:i + 1]) for i in range(num_regular_variables)
if i in self._static_input_loc] + [embedded_inputs[i][:, 0, :]
for i in range(num_categorical_variables)
if i + num_regular_variables in self._static_input_loc]
# (num_examples, num_cat_variables, hidden_units) <- [(num_examples, hidden_units)]
static_inputs = stack(static_inputs, axis=1)
else: # there are not static inputs
static_inputs = None
def convert_real_to_embedding(_x, _name=None):
"""Applies linear transformation for time-varying inputs."""
return TimeDistributed(Dense(self.hidden_units, name=_name))(_x)
# whether we have and want to use target observations as inputs or not?
if len(self._input_obs_loc) > 0:
# Targets
obs_inputs = stack([ # (num_examples, time_steps, hidden_units, 1)
convert_real_to_embedding(regular_inputs[Ellipsis, i:i + 1], _name='InputObsDense')
for i in self._input_obs_loc], axis=-1)
else:
obs_inputs = None
wired_embeddings = []
if num_categorical_variables > 0:
# Observed (a prioir unknown) inputs
for i in range(num_categorical_variables):
if i not in self._known_categorical_input_idx \
and i not in self._input_obs_loc:
e = embeddings[i](categorical_inputs[:, :, i])
wired_embeddings.append(e)
unknown_inputs = []
for i in range(regular_inputs.shape[-1]):
if i not in self._known_regular_input_idx \
and i not in self._input_obs_loc:
e = convert_real_to_embedding(regular_inputs[Ellipsis, i:i + 1], _name=f'RegularInputsDense{i}')
unknown_inputs.append(e)
if unknown_inputs + wired_embeddings:
if len(wired_embeddings) > 0:
unknown_inputs = stack(unknown_inputs + wired_embeddings, axis=-1)
else:
unknown_inputs = None
# A priori known inputs
known_regular_inputs = [ # list of tensors all of shape (num_examples, total_time_steps, hidden_units)
# feeding (num_examples, total_time_steps, 1) at each loop
convert_real_to_embedding(regular_inputs[Ellipsis, i:i + 1], _name=f'KnownRegularInputs')
for i in self._known_regular_input_idx
if i not in self._static_input_loc
]
known_categorical_inputs = [ # []
embedded_inputs[i]
for i in self._known_categorical_input_idx
if i + num_regular_variables not in self._static_input_loc
]
# (num_examples, time_steps, hidden_units, num_outputs)
known_combined_layer = stack(known_regular_inputs + known_categorical_inputs, axis=-1)
return unknown_inputs, known_combined_layer, obs_inputs, static_inputs
def build_cnn(inputs, filters, kernel_size, return_state, _name, use_cudnn=True):
cnn = Conv1D(filters=filters, kernel_size=kernel_size, padding="causal", name=_name)
cnn_output = cnn(inputs)
return cnn_output
# LSTM layer
def get_lstm(hidden_units, return_state, _name=None, use_cudnn=True):
"""Returns LSTM cell initialized with default parameters."""
if use_cudnn:
lstm = tf.keras.layers.CuDNNLSTM(
hidden_units,
return_sequences=True,
return_state=return_state,
stateful=False,
name=_name
)
else:
lstm = tf.keras.layers.LSTM(
hidden_units,
return_sequences=True,
return_state=return_state,
stateful=False,
# Additional params to ensure LSTM matches CuDNN, See TF 2.0 :
# (https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM)
activation='tanh',
recurrent_activation='sigmoid',
recurrent_dropout=0,
unroll=False,
use_bias=True,
name=_name
)
return lstm | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/models/_tensorflow/tft_layer.py | tft_layer.py |
from typing import Union, List
from ai4water.backend import tf
from ..utils import _make_output_layer, _check_length
def MLP(
units: Union[int, list] = 32,
num_layers: int = 1,
input_shape: tuple = None,
num_outputs: int = 1,
activation: Union[str, list] = None,
dropout: Union[float, list] = None,
mode: str = "regression",
output_activation: str = None,
**kwargs
):
"""makes layers for MLP model for tensorflow"""
assert num_layers>=1
units = _check_length(units, num_layers)
dropout = _check_length(dropout, num_layers)
activation = _check_length(activation, num_layers)
if input_shape is None:
layers = {}
else:
layers = {"Input": {"shape": input_shape}}
for idx, lyr in enumerate(range(num_layers)):
config = {"units": units[idx],
"activation": activation[idx],
}
config.update(kwargs)
_lyr = {f"Dense_{lyr}": config}
layers.update(_lyr)
_dropout = dropout[idx]
if _dropout and _dropout > 0.0:
layers.update({"Dropout": {"rate": _dropout}})
layers.update({"Flatten": {}})
layers = _make_output_layer(
layers,
mode,
num_outputs,
output_activation
)
return {'layers': layers}
def LSTM(
units: Union[int, list] = 32,
num_layers:int = 1,
input_shape: tuple = None,
num_outputs:int = 1,
activation: Union[str, list] = None,
dropout: Union[float, list] = None,
mode:str = "regression",
output_activation:str = None,
**kwargs
):
"""helper function to make LSTM Model for tensorflow
"""
units, dropout, activation, layers = _precheck(
num_layers, input_shape, units, dropout, activation)
for idx, lyr in enumerate(range(num_layers)):
return_sequences = False
if idx+1 != num_layers:
return_sequences = True
config = {"units": units[idx],
"activation": activation[idx],
"return_sequences": return_sequences,
}
config.update(kwargs)
_lyr = {f"LSTM_{lyr}": config}
layers.update(_lyr)
_dropout = dropout[idx]
if _dropout and _dropout > 0.0:
layers.update({"Dropout": {"rate": _dropout}})
layers.update({"Flatten": {}})
layers = _make_output_layer(
layers,
mode,
num_outputs,
output_activation
)
return {'layers': layers}
def AttentionLSTM(
units: Union[int, list] = 32,
num_layers: int = 1,
input_shape: tuple = None,
num_outputs: int = 1,
activation: Union[str, list] = None,
dropout: Union[float, list] = None,
atten_units:int = 128,
atten_activation:str = "tanh",
mode: str = "regression",
output_activation: str = None,
**kwargs
)->dict:
"""helper function to make AttentionLSTM Model for tensorflow"""
units, dropout, activation, layers = _precheck(
num_layers, input_shape, units, dropout, activation)
for idx, lyr in enumerate(range(num_layers)):
config = {"units": units[idx],
"activation": activation[idx],
"return_sequences": True,
}
config.update(kwargs)
_lyr = {f"LSTM_{lyr}": config}
layers.update(_lyr)
_dropout = dropout[idx]
if _dropout and _dropout > 0.0:
layers.update({"Dropout": {"rate": _dropout}})
attn_layer = {"SelfAttention":
{"config": {"units": atten_units, "activation": atten_activation},
"outputs": ["atten_outputs", "atten_weights"]}}
layers.update(attn_layer)
layers = _make_output_layer(
layers,
mode,
num_outputs,
output_activation,
inputs="atten_outputs"
)
return {'layers': layers}
def CNN(
filters: Union[int, list] = 32,
kernel_size: Union[int, tuple, list] = 3,
convolution_type: str = "1D",
num_layers: int = 1,
padding: Union[str, list] = "same",
strides: Union[int, list]= 1,
pooling_type: Union[str, list] = None,
pool_size: Union[int, list] = 2,
batch_normalization: Union[bool, list] = None,
activation: Union[str, list] = None,
dropout: Union[float, list] = None,
input_shape: tuple = None,
num_outputs:int = 1,
mode: str = "regression",
output_activation:str = None,
**kwargs
)->dict:
"""helper function to make convolution neural network based model for tensorflow
"""
assert num_layers>=1
assert convolution_type in ("1D", "2D", "3D")
assert pooling_type in ("MaxPool", "AveragePooling", None)
filters = _check_length(filters, num_layers)
activation = _check_length(activation, num_layers)
padding = _check_length(padding, num_layers)
strides = _check_length(strides, num_layers)
pooling_type = _check_length(pooling_type, num_layers)
pool_size = _check_length(pool_size, num_layers)
kernel_size = _check_length(kernel_size, num_layers)
batch_normalization = _check_length(batch_normalization, num_layers)
dropout = _check_length(dropout, num_layers)
if input_shape is None:
layers = {}
else:
assert len(input_shape) >= 2
layers = {"Input": {"shape": input_shape}}
for idx, lyr in enumerate(range(num_layers)):
pool_type = pooling_type[idx]
batch_norm = batch_normalization[idx]
config = {
"filters": filters[idx],
"kernel_size": kernel_size[idx],
"activation": activation[idx],
"strides": strides[idx],
"padding": padding[idx]
}
config.update(kwargs)
_lyr = {f"Conv{convolution_type}_{lyr}": config}
layers.update(_lyr)
if pool_type:
pool_lyr = f"{pool_type}{convolution_type}"
_lyr = {pool_lyr: {"pool_size": pool_size[idx]}}
layers.update(_lyr)
if batch_norm:
layers.update({"BatchNormalization": {}})
_dropout = dropout[idx]
if _dropout and _dropout > 0.0:
layers.update({"Dropout": {"rate": _dropout}})
layers.update({"Flatten": {}})
layers = _make_output_layer(
layers,
mode,
num_outputs,
output_activation
)
return {'layers': layers}
def CNNLSTM(
input_shape:tuple,
sub_sequences=3,
cnn_layers:int = 2,
lstm_layers:int = 1,
filters:Union[int, list]=32,
kernel_size: Union[int, tuple, list]=3,
max_pool:bool=False,
units: Union[int, tuple, list] = 32,
num_outputs:int = 1,
mode:str = "regression",
output_activation:str = None,
)->dict:
"""
helper function to make CNNLSTM model for tensorflow
"""
assert len(input_shape) == 2
layers = {"Input": {"shape": input_shape}}
lookback = input_shape[-2]
input_features = input_shape[-1]
time_steps = lookback // sub_sequences
new_shape = sub_sequences, time_steps, input_features
layers.update({"Reshape": {"target_shape": new_shape}})
filters = _check_length(filters, cnn_layers)
kernel_size = _check_length(kernel_size, cnn_layers)
units = _check_length(units, lstm_layers)
for idx, cnn_lyr in enumerate(range(cnn_layers)):
layers.update({f"TimeDistributed_{idx}": {}})
config = {"filters": filters[idx],
"kernel_size": kernel_size[idx],
"padding": "same"}
layers.update({f"Conv1D_{idx}": config})
if max_pool:
layers.update({f"TimeDistributed_mp{idx}": {}})
layers.update({f"MaxPool1D_{idx}": {}})
layers.update({"TimeDistributed_e": {}})
layers.update({'Flatten': {}})
for lstm_lyr in range(lstm_layers):
return_sequences = False
if lstm_lyr+1 != lstm_layers:
return_sequences = True
config = {"units": units[lstm_lyr],
"return_sequences": return_sequences
}
layers.update({f"LSTM_{lstm_lyr}": config})
layers.update({"Flatten": {}})
layers = _make_output_layer(
layers,
mode,
num_outputs,
output_activation
)
return {"layers": layers}
def LSTMAutoEncoder(
input_shape:tuple,
encoder_layers:int = 1,
decoder_layers:int = 1,
encoder_units: Union[int, list]=32,
decoder_units: Union[int, list]=32,
num_outputs: int = 1,
prediction_mode: bool = True,
mode:str = "regression",
output_activation: str = None,
**kwargs
)->dict:
"""
helper function to make LSTM based AutoEncoder model for tensorflow
"""
assert len(input_shape)>=2
assert encoder_layers >= 1
assert decoder_layers >= 1
lookback = input_shape[-2]
encoder_units = _check_length(encoder_units, encoder_layers)
decoder_units = _check_length(decoder_units, decoder_layers)
layers = {"Input": {"shape": input_shape}}
for idx, enc_lyr in enumerate(range(encoder_layers)):
return_sequences = False
if idx + 1 != encoder_layers:
return_sequences = True
config = {
"units": encoder_units[idx],
"return_sequences": return_sequences
}
lyr = {f"LSTM_e{idx}": config}
layers.update(lyr)
layers.update({'RepeatVector': lookback})
for idx, dec_lyr in enumerate(range(decoder_layers)):
return_sequences = False
if idx + 1 != decoder_units:
return_sequences = True
config = {
"units": decoder_units[idx],
"return_sequences": return_sequences
}
lyr = {f"LSTM_d{idx}": config}
layers.update(lyr)
layers.update({"Flatten": {}})
layers = _make_output_layer(
layers,
mode,
num_outputs,
output_activation
)
return {'layers': layers}
def TCN(
input_shape,
filters:int = 32,
kernel_size: int = 2,
nb_stacks: int = 1,
dilations = [1, 2, 4, 8, 16, 32],
num_outputs:int = 1,
mode="regression",
output_activation: str = None,
**kwargs
)->dict:
"""helper function for building temporal convolution network with tensorflow
"""
layers = {"Input": {"shape": input_shape}}
config = {'nb_filters': filters,
'kernel_size': kernel_size,
'nb_stacks': nb_stacks,
'dilations': dilations,
'padding': 'causal',
'use_skip_connections': True,
'return_sequences': False,
'dropout_rate': 0.0}
config.update(kwargs)
layers.update({"TCN": config})
layers.update({"Flatten": {}})
layers = _make_output_layer(
layers,
mode,
num_outputs,
output_activation
)
return {'layers': layers}
def TFT(
input_shape,
hidden_units: int = 32,
num_heads: int = 3,
dropout:float = 0.1,
num_outputs:int = 1,
use_cudnn:bool = False,
mode:str="regression",
output_activation:str = None,
)->dict:
"""helper function for temporal fusion transformer based model for tensorflow
"""
num_encoder_steps = input_shape[-2]
input_features = input_shape[-1]
params = {
'total_time_steps': num_encoder_steps,
'num_encoder_steps': num_encoder_steps,
'num_inputs': input_features,
'category_counts': [],
'input_obs_loc': [], # leave empty if not available
'static_input_loc': [], # if not static inputs, leave this empty
'known_regular_inputs': list(range(input_features)),
'known_categorical_inputs': [], # leave empty if not applicable
'hidden_units': hidden_units,
'dropout_rate': dropout,
'num_heads': num_heads,
'use_cudnn': use_cudnn,
'future_inputs': False,
'return_sequences': True,
}
layers = {
"Input": {"config": {"shape": (params['total_time_steps'], input_features)}},
"TemporalFusionTransformer": {"config": params},
"lambda": {"config": tf.keras.layers.Lambda(lambda _x: _x[Ellipsis, -1, :])},
}
layers = _make_output_layer(
layers,
mode,
num_outputs,
output_activation
)
return {'layers': layers}
def TabTransformer(
num_numeric_features: int,
cat_vocabulary: dict,
hidden_units=32,
num_heads: int = 4,
depth: int = 4,
dropout: float = 0.1,
num_dense_lyrs: int = 2,
prenorm_mlp:bool = True,
post_norm: bool = True,
final_mlp_units: Union[int, List[int]] = 16,
num_outputs: int = 1,
mode: str = "regression",
output_activation: str = None,
seed:int = 313,
):
"""
TabTransformer for tensorflow
"""
layers = _make_input_lyrs(num_numeric_features, len(cat_vocabulary))
layers.update(
{"TabTransformer": {"config": dict(
cat_vocabulary=cat_vocabulary,
num_numeric_features=num_numeric_features,
hidden_units = hidden_units,
num_heads = num_heads,
depth= depth,
dropout = dropout,
num_dense_lyrs = num_dense_lyrs,
prenorm_mlp = prenorm_mlp,
post_norm = post_norm,
final_mlp_units = final_mlp_units,
seed = seed,
),
"inputs": ["Input_num", "Input_cat"],
"outputs": ['transformer_output', 'imp']}}
)
layers = _make_output_layer(
layers,
mode,
num_outputs,
output_activation,
inputs="transformer_output"
)
return {"layers": layers}
def FTTransformer(
num_numeric_features:int,
cat_vocabulary:dict=None,
hidden_units = 32,
num_heads: int = 4,
depth:int = 4,
dropout: float = 0.1,
num_dense_lyrs: int = 2,
lookup_kws:dict = None,
post_norm:bool = True,
final_mlp_units:int = 16,
num_outputs: int = 1,
mode: str = "regression",
output_activation: str = None,
seed:int = 313,
)->dict:
"""
FTTransformer for tensorflow
"""
if cat_vocabulary:
layers = _make_input_lyrs(num_numeric_features, len(cat_vocabulary))
inputs = ["Input_num", "Input_cat"]
else:
layers = _make_input_lyrs(num_numeric_features)
inputs = "Input_num"
body = {
"FTTransformer": {"config": dict(
cat_vocabulary=cat_vocabulary,
num_numeric_features=num_numeric_features,
hidden_units=hidden_units,
num_heads=num_heads,
depth=depth,
lookup_kws=lookup_kws,
num_dense_lyrs=num_dense_lyrs,
post_norm=post_norm,
final_mlp_units=final_mlp_units,
dropout=dropout,
seed=seed
),
"inputs": inputs,
"outputs": ['transformer_output', 'imp']}
}
layers.update(body)
layers = _make_output_layer(
layers,
mode,
num_outputs,
output_activation,
inputs='transformer_output'
)
return {"layers": layers}
def _make_input_lyrs(
num_numeric_features:int,
num_cat_features:int=None,
numeric_dtype:tf.DType=tf.float32,
cat_dtype:tf.DType = tf.string
)->dict:
if num_cat_features:
lyrs = {
"Input_num": {"shape": (num_numeric_features,), "dtype": numeric_dtype},
"Input_cat": {"shape": (num_cat_features,), "dtype": cat_dtype}
}
else:
lyrs = {
"Input_num": {"shape": (num_numeric_features,), "dtype": numeric_dtype}
}
return lyrs
def _precheck(num_layers, input_shape, units, dropout, activation):
assert num_layers>=1
if input_shape is None:
layers = {}
else:
layers = {"Input": {"shape": input_shape}}
assert len(input_shape)>=2
units = _check_length(units, num_layers)
dropout = _check_length(dropout, num_layers)
activation = _check_length(activation, num_layers)
return units, dropout, activation, layers | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/models/_tensorflow/_functions.py | _functions.py |
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops import array_ops
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Layer, Multiply
from tensorflow.keras.layers import TimeDistributed, Dense, Dropout, Add, Activation, Lambda
from ai4water.backend import tf
LayerNorm = tf.keras.layers.LayerNormalization
def concatenate(tensors, axis=-1, name="concat"):
"""Concatenates a list of tensors alongside the specified axis.
Args:
tensors: list of tensors to concatenate.
axis: concatenation axis.
name: str,
Returns:
A tensor.
Example:
>>>a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>>b = tf.constant([[10, 20, 30], [40, 50, 60], [70, 80, 90]])
>>>tf.keras.backend.concatenate((a, b), axis=-1)
<tf.Tensor: shape=(3, 6), dtype=int32, numpy=
array([[ 1, 2, 3, 10, 20, 30],
[ 4, 5, 6, 40, 50, 60],
[ 7, 8, 9, 70, 80, 90]], dtype=int32)>
"""
if axis < 0:
rank = K.ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if all(K.is_sparse(x) for x in tensors):
return sparse_ops.sparse_concat(axis, tensors, name=name)
elif all(isinstance(x, ragged_tensor.RaggedTensor) for x in tensors):
return array_ops.concat(tensors, axis, name=name)
else:
return array_ops.concat([K.to_dense(x) for x in tensors], axis, name=name)
# Layer utility functions.
def linear_layer(size,
activation=None,
use_time_distributed=False,
use_bias=True,
kernel_constraint=None,
name=None
):
"""Returns simple Keras linear layer.
Args:
size: Output size
activation: Activation function to apply if required
use_time_distributed: Whether to apply layer across time
use_bias: Whether bias should be included in layer
kernel_constraint:
name:
"""
linear = tf.keras.layers.Dense(size, activation=activation, use_bias=use_bias, name=name,
kernel_constraint=kernel_constraint)
if use_time_distributed:
linear = TimeDistributed(linear, name=name)
return linear
def apply_gating_layer(x,
hidden_layer_size,
dropout_rate=None,
use_time_distributed=False,
activation=None,
activation_kernel_constraint=None,
gating_kernel_constraint=None,
name=None,
seed=313,
):
"""Applies a Gated Linear Unit (GLU) to an input.
Args:
x: Input to gating layer having shape (num_examples, hidden_units)
hidden_layer_size: Dimension of GLU
dropout_rate: Dropout rate to apply if any
use_time_distributed: Whether to apply across time
activation: Activation function to apply to the linear feature transform if
necessary
name: name of encompassing layers
activation_kernel_constraint:
gating_kernel_constraint
Returns:
Tuple of tensors for: (GLU output, gate) where GLU output has the shape (num_examples, hidden_units)
"""
if dropout_rate is not None:
x = Dropout(dropout_rate, name=f'Dropout_{name}', seed=seed)(x)
if use_time_distributed:
activation_layer = TimeDistributed(Dense(hidden_layer_size,
kernel_constraint=activation_kernel_constraint,
activation=activation,
name=f'gating_act_{name}'),
name=f'gating_act_{name}')(x)
gated_layer = TimeDistributed(
Dense(hidden_layer_size,
activation='sigmoid',
kernel_constraint=gating_kernel_constraint,
name=f'gating_{name}'),
name=f'gating_{name}')(x)
else:
activation_layer = Dense(
hidden_layer_size, activation=activation,
kernel_constraint=activation_kernel_constraint,
name=f'gating_act_{name}')(x)
gated_layer = Dense(
hidden_layer_size,
activation='sigmoid',
kernel_constraint=gating_kernel_constraint,
name=f'gating_{name}')(
x)
return Multiply(name=f'MulGating_{name}')([activation_layer, gated_layer]), gated_layer
def add_and_norm(x_list, name=None, norm=True):
"""Applies skip connection followed by layer normalisation.
Args:
x_list: List of inputs to sum for skip connection
name:
Returns:
Tensor output from layer.
"""
tmp = Add(name=f'add_{name}')(x_list)
if norm:
tmp = LayerNorm(name=f'norm_{name}')(tmp)
return tmp
def gated_residual_network(
x,
hidden_layer_size,
output_size=None,
dropout_rate=None,
use_time_distributed=True,
additional_context=None,
return_gate=False,
activation:str='elu',
kernel_constraint1=None,
kernel_constraint2=None,
kernel_constraint3=None,
gating_kernel_constraint1=None,
gating_kernel_constraint2=None,
norm=True,
name='GRN',
seed=313,
):
"""Applies the gated residual network (GRN) as defined in paper.
Args:
x: Network inputs
hidden_layer_size: Internal state size
output_size: Size of output layer
dropout_rate: Dropout rate if dropout is applied
use_time_distributed: Whether to apply network across time dimension
additional_context: Additional context vector to use if relevant
return_gate: Whether to return GLU gate for diagnostic purposes
name: name of all layers
activation: the kind of activation function to use
kernel_constraint1: kernel constranint to be applied on skip_connection layer
kernel_constraint2: kernel constranint to be applied on 1st linear layer before activation
kernel_constraint3: kernel constranint to be applied on 2nd linear layer after activation
gating_kernel_constraint1: kernel constraint for activation in gating layer
gating_kernel_constraint2: kernel constaint for gating layer
Returns:
Tuple of tensors for: (GRN output, GLU gate)
"""
# Setup skip connection
if output_size is None:
output_size = hidden_layer_size
skip = x
else:
linear = Dense(output_size, name=f'skip_connection_{name}', kernel_constraint=kernel_constraint1)
if use_time_distributed:
linear = TimeDistributed(linear, name=f'skip_connection_{name}')
skip = linear(x)
# Apply feedforward network
hidden = linear_layer(
hidden_layer_size,
activation=None,
use_time_distributed=use_time_distributed,
kernel_constraint=kernel_constraint2,
name=f"ff_{name}"
)(x)
if additional_context is not None:
hidden = hidden + linear_layer(
hidden_layer_size,
activation=None,
use_time_distributed=use_time_distributed,
use_bias=False,
name=f'addition_cntxt_{name}'
)(additional_context)
hidden = Activation(activation, name=f'{name}_{activation}')(hidden)
hidden = linear_layer(
hidden_layer_size,
activation=None,
kernel_constraint=kernel_constraint3,
use_time_distributed=use_time_distributed,
name=f'{name}_LastDense'
)(
hidden)
gating_layer, gate = apply_gating_layer(
hidden,
output_size,
dropout_rate=dropout_rate,
use_time_distributed=use_time_distributed,
activation=None,
activation_kernel_constraint=gating_kernel_constraint1,
gating_kernel_constraint=gating_kernel_constraint2,
name=name,
seed=seed
)
if return_gate:
return add_and_norm([skip, gating_layer], name=name, norm=norm), gate
else:
return add_and_norm([skip, gating_layer], name=name, norm=norm)
# Attention Components.
def get_decoder_mask(self_attn_inputs):
"""Returns causal mask to apply for self-attention layer.
Args:
self_attn_inputs: Inputs to self attention layer to determine mask shape
"""
len_s = tf.shape(self_attn_inputs)[1]
bs = tf.shape(self_attn_inputs)[:1]
mask = K.cumsum(tf.eye(len_s, batch_shape=bs), 1)
return mask
class ScaledDotProductAttention(tf.keras.layers.Layer):
"""Defines scaled dot product attention layer.
Attributes:
dropout: Dropout rate to use
activation: Normalisation function for scaled dot product attention (e.g.
softmax by default)
"""
def __init__(self, attn_dropout=0.0, seed=313, **kwargs):
self.dropout = Dropout(attn_dropout, name="ScaledDotProdAtten_dropout", seed=seed)
self.activation = Activation('softmax', name="ScaledDotProdAtten_softmax")
super().__init__(**kwargs)
def __call__(self, q, k, v, mask, idx):
"""Applies scaled dot product attention.
Args:
q: Queries
k: Keys
v: Values
mask: Masking if required -- sets softmax to very large value
Returns:
Tuple of (layer outputs, attention weights)
"""
temper = tf.sqrt(tf.cast(tf.shape(k)[-1], dtype='float32'))
attn = MyLambda(name=f"ScaledDotProdAttenLambda{idx}")([q, k], temper=temper)
if mask is not None:
mmask = Lambda(lambda x: (-1e+9) * (1. - K.cast(x, 'float32')), name=f"ScaledDotProdAttenLambdaMask{idx}")(
mask) # setting to infinity
attn = Add(name=f'SDPA_ADD_{idx}')([attn, mmask])
attn = self.activation(attn)
attn = self.dropout(attn)
output = Lambda(lambda x: K.batch_dot(x[0], x[1]), name=f"ScaledDotProdAttenOutput{idx}")([attn, v])
return output, attn
class MyLambda(Layer):
def __init__(self, name="ScaledDotProdAttenLambda", *args, **kwargs):
super(MyLambda, self).__init__(*args, name=name, **kwargs)
def call(self, x, temper, *args, **kwargs):
return K.batch_dot(x[0], x[1], axes=[2, 2]) / temper
class InterpretableMultiHeadAttention(tf.keras.layers.Layer):
"""Defines interpretable multi-head attention layer.
Attributes:
n_head: Number of heads
d_k: Key/query dimensionality per head
d_v: Value dimensionality
dropout: Dropout rate to apply
qs_layers: List of queries across heads
ks_layers: List of keys across heads
vs_layers: List of values across heads
attention: Scaled dot product attention layer
w_o: Output weight matrix to project internal state to the original TFT
state size
"""
def __init__(self, n_head, d_model, dropout, seed=313, **kwargs):
"""Initialises layer.
Args:
n_head: Number of heads
d_model: TFT state dimensionality
dropout: Dropout discard rate
"""
self.n_head = n_head
self.d_k = self.d_v = d_k = d_v = d_model // n_head
self.dropout = dropout
self.seed=seed
self.qs_layers = []
self.ks_layers = []
self.vs_layers = []
# Use same value layer to facilitate interp
vs_layer = Dense(d_v, use_bias=False)
for _ in range(n_head):
self.qs_layers.append(Dense(d_k, use_bias=False))
self.ks_layers.append(Dense(d_k, use_bias=False))
self.vs_layers.append(vs_layer) # use same vs_layer
self.attention = ScaledDotProductAttention(name="ScaledDotProdAtten", seed=seed)
self.w_o = Dense(d_model, use_bias=False, name="MH_atten_output")
super().__init__(**kwargs)
def __call__(self, q, k, v, mask=None):
"""Applies interpretable multihead attention.
Using T to denote the number of time steps fed into the transformer.
Args:
q: Query tensor of shape=(?, T, d_model)
k: Key of shape=(?, T, d_model)
v: Values of shape=(?, T, d_model)
mask: Masking if required with shape=(?, T, T)
Returns:
Tuple of (layer outputs, attention weights)
"""
n_head = self.n_head
heads = []
attns = []
for i in range(n_head):
qs = self.qs_layers[i](q)
ks = self.ks_layers[i](k)
vs = self.vs_layers[i](v)
head, attn = self.attention(qs, ks, vs, mask, i)
head_dropout = Dropout(self.dropout, seed=self.seed)(head)
heads.append(head_dropout)
attns.append(attn)
head = array_ops.stack(heads, axis=0, name="MultiHeadAtten_heads") if n_head > 1 else heads[0]
attn = array_ops.stack(attns, axis=0, name="MultiHeadAttention_attens")
_outputs = K.mean(head, axis=0) if n_head > 1 else head
_outputs = self.w_o(_outputs)
_outputs = Dropout(self.dropout, name="MHA_output_do", seed=self.seed)(_outputs) # output dropout
return _outputs, attn
# Loss functions.
def tensorflow_quantile_loss(y, y_pred, quantile):
"""Computes quantile loss for tensorflow.
Standard quantile loss as defined in the "Training Procedure" section of
the main TFT paper
Args:
y: Targets
y_pred: Predictions
quantile: Quantile to use for loss calculations (between 0 & 1)
Returns:
Tensor for quantile loss.
"""
# Checks quantile
if quantile < 0 or quantile > 1:
raise ValueError(
'Illegal quantile value={}! Values should be between 0 and 1.'.format(quantile))
prediction_underflow = y - y_pred
q_loss = quantile * tf.maximum(prediction_underflow, 0.) + (
1. - quantile) * tf.maximum(-prediction_underflow, 0.)
return tf.reduce_sum(q_loss, axis=-1)
def quantile_loss(a, b, quantiles, output_size):
"""Returns quantile loss for specified quantiles.
Args:
a: Targets
b: Predictions
quantiles:
output_size:
"""
quantiles_used = set(quantiles)
loss = 0.
for i, quantile in enumerate(quantiles):
if quantile in quantiles_used:
loss += tensorflow_quantile_loss(
a[Ellipsis, output_size * i:output_size * (i + 1)],
b[Ellipsis, output_size * i:output_size * (i + 1)], quantile)
return loss | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/models/_tensorflow/utils.py | utils.py |
from ai4water.backend import keras, np
K = keras.backend
Concatenate, Add, Reshape = keras.layers.Concatenate, keras.layers.Add, keras.layers.Reshape
Input, Dense, Lambda, Subtract = keras.layers.Input, keras.layers.Dense, keras.layers.Lambda, keras.layers.Subtract
Model = keras.models.Model
class NBeats(keras.layers.Layer):
"""
This implementation is same as that of Philip peremy_ with few modifications.
Here NBeats can be used as a layer. The output shape will be
(batch_size, forecast_length, input_dim)
Some other changes have also been done to make this layer compatable with ai4water.
Example
-------
>>> x = np.random.random((100, 10, 3))
>>> y = np.random.random((100, 1))
...
>>> model = Model(model={"layers":
>>> {"Input": {"shape": (10, 3)},
>>> "NBeats": {"lookback": 10, "forecast_length": 1, "num_exo_inputs": 2},
>>> "Flatten": {},
>>> "Reshape": {"target_shape": (1,1)}}},
>>> ts_args={'lookback':10})
...
>>> model.fit(x=x, y=y.reshape(-1,1,1))
.. _peremy:
https://github.com/philipperemy/n-beats/tree/master/nbeats_keras)
"""
GENERIC_BLOCK = 'generic'
TREND_BLOCK = 'trend'
SEASONALITY_BLOCK = 'seasonality'
def __init__(
self,
units: int = 256,
lookback: int = 10,
forecast_len: int = 2,
stack_types=(TREND_BLOCK, SEASONALITY_BLOCK),
nb_blocks_per_stack=3,
thetas_dim=(4, 8),
share_weights_in_stack=False,
nb_harmonics=None,
num_inputs=1,
num_exo_inputs=0,
**kwargs
):
"""
Initiates the Nbeats layer
Arguments:
units :
Number of units in NBeats layer. It determines the size of NBeats.
lookback:
Number of historical time-steps used to predict next value
forecast_len :
stack_types :
nb_blocks_per_stack :
theta_dim :
share_weights_in_stack :
nb_harmonics :
num_inputs:
num_exo_inputs:
kwargs :
"""
if num_inputs != 1:
raise NotImplementedError
self.stack_types = stack_types
self.nb_blocks_per_stack = nb_blocks_per_stack
self.thetas_dim = thetas_dim
self.units = units
self.share_weights_in_stack = share_weights_in_stack
self.lookback = lookback
self.forecast_length = forecast_len
self.input_dim = num_inputs
self.exo_dim = num_exo_inputs
self.exo_shape = (self.lookback, self.exo_dim)
self._weights = {}
self.nb_harmonics = nb_harmonics
assert len(self.stack_types) == len(self.thetas_dim)
super().__init__(**kwargs)
def __call__(self, inputs, *args, **kwargs):
"""The first num_inputs from inputs will be considered as the same feature
which is being predicted. Since the ai4water, by default does nowcasting
instead of forecasting, and NBeats using the target/ground truth as input,
using NBeats for nowcasting does not make sense.
For example in case, the inputs is of shape (100, 5, 3), then
inputs consists of three inputs and we consider the first
input as target feature and other two as exogenous features.
"""
e_ = {}
if self.has_exog():
x = inputs[..., 0:self.input_dim]
e = inputs[..., self.input_dim:]
for k in range(self.exo_dim):
e_[k] = Lambda(lambda z: z[..., k])(e)
else:
x = inputs
x_ = {}
for k in range(self.input_dim):
x_[k] = Lambda(lambda z: z[..., k])(x)
y_ = {}
for stack_id in range(len(self.stack_types)):
stack_type = self.stack_types[stack_id]
nb_poly = self.thetas_dim[stack_id]
for block_id in range(self.nb_blocks_per_stack):
backcast, forecast = self.create_block(x_, e_, stack_id, block_id, stack_type, nb_poly)
for k in range(self.input_dim):
x_[k] = Subtract()([x_[k], backcast[k]])
if stack_id == 0 and block_id == 0:
y_[k] = forecast[k]
else:
y_[k] = Add()([y_[k], forecast[k]])
for k in range(self.input_dim):
y_[k] = Reshape(target_shape=(self.forecast_length, 1))(y_[k])
if self.input_dim > 1:
y_ = Concatenate(axis=-1)([y_[ll] for ll in range(self.input_dim)])
else:
y_ = y_[0]
return y_
def has_exog(self):
return self.exo_dim > 0
def _r(self, layer_with_weights, stack_id):
# mechanism to restore weights when block share the same weights.
# only useful when share_weights_in_stack=True.
if self.share_weights_in_stack:
layer_name = layer_with_weights.name.split('/')[-1]
try:
reused_weights = self._weights[stack_id][layer_name]
return reused_weights
except KeyError:
pass
if stack_id not in self._weights:
self._weights[stack_id] = {}
self._weights[stack_id][layer_name] = layer_with_weights
return layer_with_weights
def create_block(self, x, e, stack_id, block_id, stack_type, nb_poly):
# register weights (useful when share_weights_in_stack=True)
def reg(layer):
return self._r(layer, stack_id)
# update name (useful when share_weights_in_stack=True)
def n(layer_name):
return '/'.join([str(stack_id), str(block_id), stack_type, layer_name])
backcast_ = {}
forecast_ = {}
d1 = reg(Dense(self.units, activation='relu', name=n('d1')))
d2 = reg(Dense(self.units, activation='relu', name=n('d2')))
d3 = reg(Dense(self.units, activation='relu', name=n('d3')))
d4 = reg(Dense(self.units, activation='relu', name=n('d4')))
if stack_type == 'generic':
theta_b = reg(Dense(nb_poly, activation='linear', use_bias=False, name=n('theta_b')))
theta_f = reg(Dense(nb_poly, activation='linear', use_bias=False, name=n('theta_f')))
backcast = reg(Dense(self.lookback, activation='linear', name=n('backcast')))
forecast = reg(Dense(self.forecast_length, activation='linear', name=n('forecast')))
elif stack_type == 'trend':
theta_f = theta_b = reg(Dense(nb_poly, activation='linear', use_bias=False, name=n('theta_f_b')))
backcast = Lambda(trend_model, arguments={"is_forecast": False, "backcast_length": self.lookback,
"forecast_length": self.forecast_length})
forecast = Lambda(trend_model, arguments={"is_forecast": True, "backcast_length": self.lookback,
"forecast_length": self.forecast_length})
else: # 'seasonality'
if self.nb_harmonics:
theta_b = reg(Dense(self.nb_harmonics, activation='linear', use_bias=False, name=n('theta_b')))
else:
theta_b = reg(Dense(self.forecast_length, activation='linear', use_bias=False, name=n('theta_b')))
theta_f = reg(Dense(self.forecast_length, activation='linear', use_bias=False, name=n('theta_f')))
backcast = Lambda(seasonality_model,
arguments={"is_forecast": False, "backcast_length": self.lookback,
"forecast_length": self.forecast_length})
forecast = Lambda(seasonality_model,
arguments={"is_forecast": True, "backcast_length": self.lookback,
"forecast_length": self.forecast_length})
for k in range(self.input_dim):
if self.has_exog():
d0 = Concatenate()([x[k]] + [e[ll] for ll in range(self.exo_dim)])
else:
d0 = x[k]
d1_ = d1(d0)
d2_ = d2(d1_)
d3_ = d3(d2_)
d4_ = d4(d3_)
theta_f_ = theta_f(d4_)
theta_b_ = theta_b(d4_)
backcast_[k] = backcast(theta_b_)
forecast_[k] = forecast(theta_f_)
return backcast_, forecast_
def linear_space(backcast_length, forecast_length, fwd_looking=True):
ls = K.arange(-float(backcast_length), float(forecast_length), 1) / backcast_length
if fwd_looking:
ls = ls[backcast_length:]
else:
ls = ls[:backcast_length]
return ls
def seasonality_model(thetas, backcast_length, forecast_length, is_forecast):
p = thetas.get_shape().as_list()[-1]
p1, p2 = (p // 2, p // 2) if p % 2 == 0 else (p // 2, p // 2 + 1)
t = linear_space(backcast_length, forecast_length, fwd_looking=is_forecast)
s1 = K.stack([K.cos(2 * np.pi * i * t) for i in range(p1)], axis=0)
s2 = K.stack([K.sin(2 * np.pi * i * t) for i in range(p2)], axis=0)
if p == 1:
s = s2
else:
s = K.concatenate([s1, s2], axis=0)
s = K.cast(s, np.float32)
return K.dot(thetas, s)
def trend_model(thetas, backcast_length, forecast_length, is_forecast):
p = thetas.shape[-1]
t = linear_space(backcast_length, forecast_length, fwd_looking=is_forecast)
t = K.transpose(K.stack([t ** i for i in range(p)], axis=0))
t = K.cast(t, np.float32)
return K.dot(thetas, K.transpose(t)) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/models/_tensorflow/nbeats_keras.py | nbeats_keras.py |
from ai4water.backend import torch
if torch is None:
nn = None
else:
nn = torch.nn
class IMVTensorLSTM(torch.jit.ScriptModule):
__constants__ = ["n_units", "input_dim"]
def __init__(self, input_dim, output_dim, n_units, device, init_std=0.02):
super().__init__()
self.U_j = nn.Parameter(torch.randn(input_dim, 1, n_units) * init_std)
self.U_i = nn.Parameter(torch.randn(input_dim, 1, n_units) * init_std)
self.U_f = nn.Parameter(torch.randn(input_dim, 1, n_units) * init_std)
self.U_o = nn.Parameter(torch.randn(input_dim, 1, n_units) * init_std)
self.W_j = nn.Parameter(torch.randn(input_dim, n_units, n_units) * init_std)
self.W_i = nn.Parameter(torch.randn(input_dim, n_units, n_units) * init_std)
self.W_f = nn.Parameter(torch.randn(input_dim, n_units, n_units) * init_std)
self.W_o = nn.Parameter(torch.randn(input_dim, n_units, n_units) * init_std)
self.b_j = nn.Parameter(torch.randn(input_dim, n_units) * init_std)
self.b_i = nn.Parameter(torch.randn(input_dim, n_units) * init_std)
self.b_f = nn.Parameter(torch.randn(input_dim, n_units) * init_std)
self.b_o = nn.Parameter(torch.randn(input_dim, n_units) * init_std)
self.F_alpha_n = nn.Parameter(torch.randn(input_dim, n_units, 1) * init_std)
self.F_alpha_n_b = nn.Parameter(torch.randn(input_dim, 1) * init_std)
self.F_beta = nn.Linear(2 * n_units, 1)
self.Phi = nn.Linear(2 * n_units, output_dim)
self.n_units = n_units
self.input_dim = input_dim
self.device = device
@torch.jit.script_method
def forward(self, x):
h_tilda_t = torch.zeros(x.shape[0], self.input_dim, self.n_units).to(self.device)
c_tilda_t = torch.zeros(x.shape[0], self.input_dim, self.n_units).to(self.device)
outputs = torch.jit.annotate(List[Tensor], [])
for t in range(x.shape[1]):
# eq 1
j_tilda_t = torch.tanh(torch.einsum("bij,ijk->bik", h_tilda_t, self.W_j) +
torch.einsum("bij,jik->bjk", x[:, t, :].unsqueeze(1), self.U_j) + self.b_j)
# eq 5
i_tilda_t = torch.sigmoid(torch.einsum("bij,ijk->bik", h_tilda_t, self.W_i) +
torch.einsum("bij,jik->bjk", x[:, t, :].unsqueeze(1), self.U_i) + self.b_i)
f_tilda_t = torch.sigmoid(torch.einsum("bij,ijk->bik", h_tilda_t, self.W_f) +
torch.einsum("bij,jik->bjk", x[:, t, :].unsqueeze(1), self.U_f) + self.b_f)
o_tilda_t = torch.sigmoid(torch.einsum("bij,ijk->bik", h_tilda_t, self.W_o) +
torch.einsum("bij,jik->bjk", x[:, t, :].unsqueeze(1), self.U_o) + self.b_o)
# eq 6
c_tilda_t = c_tilda_t * f_tilda_t + i_tilda_t * j_tilda_t
# eq 7
h_tilda_t = (o_tilda_t * torch.tanh(c_tilda_t))
outputs += [h_tilda_t]
outputs = torch.stack(outputs)
outputs = outputs.permute(1, 0, 2, 3)
# eq 8
alphas = torch.tanh(torch.einsum("btij,ijk->btik", outputs, self.F_alpha_n) + self.F_alpha_n_b)
alphas = torch.exp(alphas)
alphas = alphas / torch.sum(alphas, dim=1, keepdim=True)
g_n = torch.sum(alphas * outputs, dim=1)
hg = torch.cat([g_n, h_tilda_t], dim=2)
mu = self.Phi(hg)
betas = torch.tanh(self.F_beta(hg))
betas = torch.exp(betas)
betas = betas / torch.sum(betas, dim=1, keepdim=True)
mean = torch.sum(betas * mu, dim=1)
return mean, alphas, betas
class IMVFullLSTM(torch.jit.ScriptModule):
__constants__ = ["n_units", "input_dim"]
def __init__(self, input_dim, output_dim, n_units, device, init_std=0.02):
super().__init__()
self.U_j = nn.Parameter(torch.randn(input_dim, 1, n_units) * init_std)
self.W_j = nn.Parameter(torch.randn(input_dim, n_units, n_units) * init_std)
self.b_j = nn.Parameter(torch.randn(input_dim, n_units) * init_std)
self.W_i = nn.Linear(input_dim * (n_units + 1), input_dim * n_units)
self.W_f = nn.Linear(input_dim * (n_units + 1), input_dim * n_units)
self.W_o = nn.Linear(input_dim * (n_units + 1), input_dim * n_units)
self.F_alpha_n = nn.Parameter(torch.randn(input_dim, n_units, 1) * init_std)
self.F_alpha_n_b = nn.Parameter(torch.randn(input_dim, 1) * init_std)
self.F_beta = nn.Linear(2 * n_units, 1)
self.Phi = nn.Linear(2 * n_units, output_dim)
self.n_units = n_units
self.input_dim = input_dim
self.device = device
@torch.jit.script_method
def forward(self, x):
h_tilda_t = torch.zeros(x.shape[0], self.input_dim, self.n_units).to(self.device)
c_t = torch.zeros(x.shape[0], self.input_dim * self.n_units).to(self.device)
outputs = torch.jit.annotate(List[Tensor], [])
for t in range(x.shape[1]):
# eq 1
j_tilda_t = torch.tanh(torch.einsum("bij,ijk->bik", h_tilda_t, self.W_j) +
torch.einsum("bij,jik->bjk", x[:, t, :].unsqueeze(1), self.U_j) + self.b_j)
inp = torch.cat([x[:, t, :], h_tilda_t.view(h_tilda_t.shape[0], -1)], dim=1)
# eq 2
i_t = torch.sigmoid(self.W_i(inp))
f_t = torch.sigmoid(self.W_f(inp))
o_t = torch.sigmoid(self.W_o(inp))
# eq 3
c_t = c_t * f_t + i_t * j_tilda_t.view(j_tilda_t.shape[0], -1)
# eq 4
h_tilda_t = (o_t * torch.tanh(c_t)).view(h_tilda_t.shape[0], self.input_dim, self.n_units)
outputs += [h_tilda_t]
outputs = torch.stack(outputs)
outputs = outputs.permute(1, 0, 2, 3)
# eq 8
alphas = torch.tanh(torch.einsum("btij,ijk->btik", outputs, self.F_alpha_n) + self.F_alpha_n_b)
alphas = torch.exp(alphas)
alphas = alphas / torch.sum(alphas, dim=1, keepdim=True)
g_n = torch.sum(alphas * outputs, dim=1)
hg = torch.cat([g_n, h_tilda_t], dim=2)
mu = self.Phi(hg)
betas = torch.tanh(self.F_beta(hg))
betas = torch.exp(betas)
betas = betas / torch.sum(betas, dim=1, keepdim=True)
mean = torch.sum(betas * mu, dim=1)
return mean, alphas, betas | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/models/_torch/imv_networks.py | imv_networks.py |
from ai4water.backend import torch
class HSGLayer(torch.nn.Module):
def __init__(self, n_units, init_gates_closed):
super(HSGLayer, self).__init__()
self.W_R = torch.nn.Linear(n_units, n_units, bias=False)
self.W_F = torch.nn.Linear(n_units, n_units)
if init_gates_closed:
self.W_F.bias = torch.nn.Parameter(torch.Tensor([-2.5]*n_units).cuda())
def forward(self, s_l_t, s_prime_tm1):
g = torch.sigmoid(self.W_R(s_prime_tm1) + self.W_F(s_l_t))
s_prime_t = g*s_prime_tm1 + (1 - g)*s_l_t
return s_prime_t
class RHNCell(torch.nn.Module):
def __init__(self, in_feats, n_units, rec_depth=3, couple_gates=True,
use_hsg=False, init_gates_closed=False):
super(RHNCell, self).__init__()
self.rec_depth = rec_depth
self.in_feats = in_feats
self.n_units = n_units
self.couple_gates = couple_gates
self.use_HSG = use_hsg
self.W_H = torch.nn.Linear(in_feats, n_units, bias=False)
self.W_T = torch.nn.Linear(in_feats, n_units, bias=False)
if not couple_gates:
self.W_C = torch.nn.Linear(in_feats, n_units, bias=False)
self.R_H = torch.nn.ModuleList([torch.nn.Linear(n_units, n_units) for _ in range(rec_depth)])
self.R_T = torch.nn.ModuleList([torch.nn.Linear(n_units, n_units) for _ in range(rec_depth)])
if not couple_gates:
self.R_C = torch.nn.ModuleList([torch.nn.Linear(n_units, n_units) for _ in range(rec_depth)])
if use_hsg:
self.HSG = HSGLayer(n_units, init_gates_closed)
if init_gates_closed:
for l in range(rec_depth):
self.R_T[l].bias = torch.nn.Parameter(torch.Tensor([-2.5] * n_units).cuda())
if not couple_gates:
self.R_C[l].bias = torch.nn.Parameter(torch.Tensor([-2.5] * n_units).cuda())
def forward(self, x, s):
if self.use_HSG:
s_prime_tm1 = s
preds = []
for l in range(self.rec_depth):
if l == 0:
h_l_t = torch.tanh(self.W_H(x) + self.R_H[l](s))
t_l_t = torch.sigmoid(self.W_T(x) + self.R_T[l](s))
if not self.couple_gates:
c_l_t = torch.sigmoid(self.W_C(x) + self.R_C[l](s))
else:
h_l_t = torch.tanh(self.R_H[l](s))
t_l_t = torch.sigmoid(self.R_T[l](s))
if not self.couple_gates:
c_l_t = torch.sigmoid(self.R_C[l](s))
if not self.couple_gates:
s = h_l_t * t_l_t + c_l_t * s
else:
s = h_l_t * t_l_t + (1 - t_l_t) * s
preds.append(s)
if self.use_HSG:
s = self.HSG(s, s_prime_tm1)
preds.pop()
preds.append(s)
preds = torch.stack(preds)
return s, preds
class RHN(torch.nn.Module):
def __init__(self, in_feats, out_feats, n_units=32, rec_depth=3, couple_gates=True, use_hsg=False,
init_gates_closed=False, use_batch_norm=False):
super(RHN, self).__init__()
assert rec_depth > 0
self.rec_depth = rec_depth
self.in_feats = in_feats
self.n_units = n_units
self.init_gates_closed = init_gates_closed
self.couple_gates = couple_gates
self.use_HSG = use_hsg
self.use_batch_norm = use_batch_norm
self.RHNCell = RHNCell(in_feats, n_units, rec_depth, couple_gates=couple_gates,
use_hsg=use_hsg, init_gates_closed=init_gates_closed)
if use_batch_norm:
self.bn_x = torch.nn.BatchNorm1d(in_feats)
self.bn_s = torch.nn.BatchNorm1d(n_units)
def forward(self, x):
s = torch.zeros(x.shape[0], self.n_units).cuda()
preds = []
highway_states = []
for t in range(x.shape[1]):
if self.use_batch_norm:
x_inp = self.bn_x(x[:, t, :])
s = self.bn_s(s)
else:
x_inp = x[:, t, :]
s, all_s = self.RHNCell(x_inp, s)
preds.append(s)
highway_states.append(all_s)
preds = torch.stack(preds)
preds = preds.permute(1, 0, 2)
highway_states = torch.stack(highway_states)
highway_states = highway_states.permute(2, 0, 3, 1)
out = preds
return out, highway_states
class ConvBlock(torch.nn.Module):
def __init__(self, timesteps, in_channels, n_filters=32, filter_size=5):
super(ConvBlock, self).__init__()
padding1 = self._calc_padding(timesteps, filter_size)
self.conv = torch.nn.Conv1d(in_channels, n_filters, filter_size, padding=padding1)
self.relu = torch.nn.ReLU()
self.maxpool = torch.nn.AdaptiveMaxPool1d(timesteps)
self.zp = torch.nn.ConstantPad1d((1, 0), 0)
def _calc_padding(self, lin, kernel, stride=1, dilation=1):
p = int(((lin - 1) * stride + 1 + dilation * (kernel - 1) - lin) / 2)
return p
def forward(self, x):
x = x.permute(0, 2, 1)
x = self.conv(x)
x = self.relu(x)
x = self.maxpool(x)
x = x.permute(0, 2, 1)
return x
class HARHN(torch.nn.Module):
def __init__(self, n_conv_layers, lookback, in_feats, target_feats, n_units_enc=32, n_units_dec=32,
enc_input_size=32,
rec_depth=3,
use_predicted_output=True,
out_feats=1, n_filters=32, filter_size=5):
super(HARHN, self).__init__()
assert n_conv_layers > 0
self.n_convs = n_conv_layers
self.n_units_enc = n_units_enc
self.n_units_dec = n_units_dec
self.rec_depth = rec_depth
self.T = lookback
self.use_predicted_output = use_predicted_output
self.convs = torch.nn.ModuleList([ConvBlock(lookback, in_feats, n_filters=n_filters,
filter_size=filter_size) if i == 0 else ConvBlock(lookback, n_filters,
n_filters=n_filters,
filter_size=filter_size)
for i in range(n_conv_layers)])
self.conv_to_enc = torch.nn.Linear(n_filters, enc_input_size)
self.RHNEncoder = RHN(enc_input_size, out_feats=n_units_enc, n_units=n_units_enc, rec_depth=rec_depth)
self.RHNDecoder = RHNCell(target_feats, n_units_dec, rec_depth=rec_depth)
self.T_k = torch.nn.ModuleList([torch.nn.Linear(n_units_dec, n_units_enc, bias=False) for _ in range(self.rec_depth)])
self.U_k = torch.nn.ModuleList([torch.nn.Linear(n_units_enc, n_units_enc) for _ in range(self.rec_depth)])
self.v_k = torch.nn.ModuleList([torch.nn.Linear(n_units_enc, 1) for _ in range(self.rec_depth)])
self.W_tilda = torch.nn.Linear(target_feats, target_feats, bias=False)
self.V_tilda = torch.nn.Linear(rec_depth * n_units_enc, target_feats)
self.W = torch.nn.Linear(n_units_dec, target_feats)
self.V = torch.nn.Linear(rec_depth * n_units_enc, target_feats)
def forward(self, x, y_prev_t):
if self.use_predicted_output:
y_prev_t = y_prev_t[0:x.shape[0]]
for conv in range(self.n_convs):
x = self.convs[conv](x)
x = self.conv_to_enc(x)
x, h_t_l = self.RHNEncoder(x) # h_T_L.shape = (batch_size, T, n_units_enc, rec_depth)
s = torch.zeros(x.shape[0], self.n_units_dec).cuda()
for t in range(self.T):
s_rep = s.unsqueeze(1)
s_rep = s_rep.repeat(1, self.T, 1)
d_t = []
for k in range(self.rec_depth):
h_t_k = h_t_l[..., k]
_ = self.U_k[k](h_t_k)
_ = self.T_k[k](s_rep)
e_t_k = self.v_k[k](torch.tanh(self.T_k[k](s_rep) + self.U_k[k](h_t_k)))
alpha_t_k = torch.softmax(e_t_k, 1)
d_t_k = torch.sum(h_t_k * alpha_t_k, dim=1)
d_t.append(d_t_k)
d_t = torch.cat(d_t, dim=1)
if self.use_predicted_output:
_, s, y_prev_t = self._last_v2(y_prev_t, d_t, s)
else:
_, s, _ = self._last_v1(y_prev_t, d_t, s, t)
y_t = self.W(s) + self.V(d_t)
return y_t, y_prev_t
def _last_v2(self, y_prev_t, d_t, s):
y_tilda_t = self.W_tilda(y_prev_t) + self.V_tilda(d_t)
s, _ = self.RHNDecoder(y_tilda_t, s)
y_prev_t = self.W(s) + self.V(d_t)
return y_tilda_t, s, y_prev_t
def _last_v1(self, y_prev_t, d_t, s, t):
y_tilda_t = self.W_tilda(y_prev_t) + self.V_tilda(d_t)
s, _ = self.RHNDecoder(y_tilda_t[:, t, :], s)
return y_tilda_t, s, y_prev_t | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/models/_torch/HARHN.py | HARHN.py |
from typing import Union
from ..utils import _check_length
def MLP(
units: Union[int, list] = 32,
num_layers: int = 1,
input_shape: tuple = None,
num_outputs: int = 1,
activation: Union[str, list] = None,
dropout: Union[float, list] = None,
mode: str = "regression",
output_activation: str = None,
**kwargs
):
"""makes layers for MLP model for tensorflow"""
assert num_layers>=1
assert input_shape is not None
if isinstance(units, int):
units = [units]*num_layers
if not isinstance(activation, list):
activation = [activation] * num_layers
if not isinstance(dropout, list):
dropout = [dropout] * num_layers
in_feat = input_shape[-1]
out_feat = units[0]
layers = {}
for i in range(num_layers):
lyr = {f"Linear_{i}": {"in_features": in_feat, "out_features": out_feat}}
in_feat = out_feat
out_feat = units[i]
layers.update(lyr)
if activation[i]:
layers.update({f'{activation[i]}_{i}': {}})
if dropout[i]:
layers.update({f'Dropout_{i}': dropout[i]})
layers = _make_output_layer(
layers,
mode,
out_feat,
num_outputs,
output_activation
)
return {"layers": layers}
def LSTM(
units: Union[int, list] = 32,
num_layers: int = 1,
input_shape: tuple = None,
num_outputs: int = 1,
activation: Union[str, list] = None,
dropout: Union[float, list] = 0.,
mode: str = "regression",
output_activation: str = None,
**kwargs
):
"""
helper function to make LSTM layers for pytorch
"""
assert isinstance(input_shape, (tuple, list))
assert len(input_shape)==2
if dropout is None:
dropout = 0.
layers = {
'LSTM_0': {"config": dict(
input_size=input_shape[1],
hidden_size= units,
batch_first= True,
num_layers= num_layers,
dropout=dropout,
**kwargs
),
"outputs": ['lstm_output', 'states_0']
},
'slice': {"config": lambda x: x[:, -1, :], # we want to get the output from last lookback step.
"inputs": "lstm_output"},
}
layers = _make_output_layer(
layers,
mode,
units,
num_outputs,
output_activation
)
return {"layers": layers}
def CNN(
filters: Union[int, list] = None,
out_channels = 32,
kernel_size: Union[int, tuple, list] = 3,
convolution_type: str = "1D",
num_layers: int = 1,
padding: Union[str, list] = "same",
stride: Union[int, list] = 1,
pooling_type: Union[str, list] = None,
pool_size: Union[int, list] = 2,
batch_normalization: Union[bool, list] = None,
activation: Union[str, list] = None,
dropout: Union[float, list] = None,
input_shape: tuple = None,
num_outputs: int = 1,
mode: str = "regression",
output_activation: str = None,
**kwargs,
):
"""
helper function to make CNN model with pytorch as backend
"""
out_channels = _check_length(out_channels, num_layers)
#activation = _check_length(activation, num_layers)
padding = _check_length(padding, num_layers)
stride = _check_length(stride, num_layers)
pooling_type = _check_length(pooling_type, num_layers)
pool_size = _check_length(pool_size, num_layers)
kernel_size = _check_length(kernel_size, num_layers)
batch_normalization = _check_length(batch_normalization, num_layers)
dropout = _check_length(dropout, num_layers)
assert isinstance(input_shape, (tuple, list))
assert len(input_shape)==2
in_feat = input_shape[-1]
out_feat = out_channels[0]
layers = {}
for idx in range(num_layers):
pool_type = pooling_type[idx]
batch_norm = batch_normalization[idx]
config = {
"kernel_size": kernel_size[idx],
"stride": stride[idx],
"padding": padding[idx],
"in_channels": in_feat,
"out_channels": out_feat
}
if kwargs is not None:
config.update(kwargs)
layers.update({f"Conv1d_{idx}": config})
if pool_type:
pool_lyr = f"{pool_type}{convolution_type}"
_lyr = {pool_lyr: {"pool_size": pool_size[idx]}}
layers.update(_lyr)
if batch_norm:
layers.update({"BatchNormalization": {}})
if dropout[idx]:
layers.update({f'Dropout_{idx}': dropout[idx]})
layers = _make_output_layer(
layers,
mode,
out_feat,
num_outputs,
output_activation
)
return {"layers": layers}
def _make_output_layer(
layers,
mode,
in_features,
num_outputs,
output_activation
):
if output_activation is None and mode == "classification":
# for binary it is better to use sigmoid
if num_outputs > 2:
output_activation = "softmax"
else:
output_activation = "sigmoid"
num_outputs = 1
layers.update(
{"Linear_out": {"in_features": in_features, 'out_features': num_outputs}})
return layers | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/models/_torch/_functions.py | _functions.py |
from typing import Union
import gc
from SeqMetrics import RegressionMetrics
from ai4water.backend import easy_mpl as ep
from ai4water.backend import os, np, plt, torch, pd
try:
import wandb
except ModuleNotFoundError:
wandb = None
# only so that docs can be built without having torch to be installed
try:
from .utils import to_torch_dataset, TorchMetrics
except ModuleNotFoundError:
to_torch_dataset, TorchMetrics = None, None
if torch is not None:
from .pytorch_attributes import LOSSES
else:
LOSSES = {}
from ai4water.utils.utils import dateandtime_now, find_best_weight
F = {
'mse': [np.nanmin, np.less],
'nse': [np.nanmax, np.greater],
'r2': [np.nanmax, np.greater],
'pbias': [np.nanmin, np.less],
'mape': [np.nanmin, np.less],
'rmse': [np.nanmin, np.less],
'nrmse': [np.nanmin, np.less],
'kge': [np.nanmax, np.greater],
}
class AttributeContainer(object):
def __init__(self, num_epochs, to_monitor=None, use_cuda=None,
path=None, verbosity=1):
self.to_monitor = get_metrics_to_monitor(to_monitor)
self.num_epochs = num_epochs
self.epoch = 0
self.val_loader = None
self.train_loader = None
self.criterion = None
self.optimizer = None
self.val_epoch_losses = {}
self.train_epoch_losses = None
self.train_metrics = {metric: np.full(num_epochs, np.nan) for metric in self.to_monitor}
self.val_metrics = {f'val_{metric}': np.full(num_epochs, np.nan) for metric in self.to_monitor}
self.best_epoch = 0 # todo,
self.use_cuda = use_cuda if use_cuda is not None else torch.cuda.is_available()
self.verbosity = verbosity
def_path = path if path is not None else os.path.join(os.getcwd(), 'results', dateandtime_now())
if not os.path.exists(def_path) and verbosity >= 0:
if not os.path.isdir(def_path):
os.makedirs(def_path)
else:
os.mkdir(def_path)
self.path = def_path
@property
def use_cuda(self):
return self._use_cuda
@use_cuda.setter
def use_cuda(self, x):
self._use_cuda = x
@property
def optimizer(self):
return self._optimizer
@optimizer.setter
def optimizer(self, x):
self._optimizer = x
@property
def loss(self):
return self._loss
@loss.setter
def loss(self, x):
if isinstance(x, str):
x = LOSSES[x.upper()]()
self._loss = x
@property
def path(self):
return self._path
@path.setter
def path(self, x):
self._path = x
def _device(self):
if self.use_cuda:
return torch.device("cuda")
else:
return torch.device("cpu")
class Learner(AttributeContainer):
"""Trains the pytorch model. Motivated from fastai"""
def __init__(
self,
model, # torch.nn.Module,
batch_size: int = 32,
num_epochs: int = 14,
patience: int = 100,
shuffle: bool = True,
to_monitor: list = None,
use_cuda:bool = False,
path: str = None,
wandb_config:dict = None,
verbosity=1,
**kwargs
):
"""
Initializes the Learner class
Arguments:
model : a pytorch model having following attributes and methods
- num_outs
- w_path
- `loss`
- `get_optimizer`
batch_size : batch size
num_epochs : Number of epochs for which to train the model
patience : how many epochs to wait before stopping the training in
case `to_monitor` does not improve.
shuffle :
use_cuda : whether to use cuda or not
to_monitor : list of metrics to monitor
path : path to save results/weights
wandb_config : config for wandb
Example
-------
>>> from torch import nn
>>> import torch
>>> from ai4water.models._torch import Learner
...
>>> class Net(nn.Module):
>>> def __init__(self, D_in, H, D_out):
... super(Net, self).__init__()
... # hidden layer
... self.linear1 = nn.Linear(D_in, H)
... self.linear2 = nn.Linear(H, D_out)
>>> def forward(self, x):
... l1 = self.linear1(x)
... a1 = torch.sigmoid(l1)
... yhat = torch.sigmoid(self.linear2(a1))
... return yhat
...
>>> learner = Learner(model=Net(1, 2, 1),
... num_epochs=501,
... patience=50,
... batch_size=1,
... shuffle=False)
...
>>> learner.optimizer = torch.optim.SGD(learner.model.parameters(), lr=0.1)
>>> def criterion_cross(labels, outputs):
... out = -1 * torch.mean(labels * torch.log(outputs) + (1 - labels) * torch.log(1 - outputs))
... return out
>>> learner.loss = criterion_cross
...
>>> X = torch.arange(-20, 20, 1).view(-1, 1).type(torch.FloatTensor)
>>> Y = torch.zeros(X.shape[0])
>>> Y[(X[:, 0] > -4) & (X[:, 0] < 4)] = 1.0
...
>>> learner.fit(X, Y)
>>> metrics = learner.evaluate(X, y=Y, metrics=['r2', 'nse', 'mape'])
>>> t = learner.predict(X, y=Y, name='training')
"""
super().__init__(num_epochs, to_monitor, path=path,
use_cuda=use_cuda,
verbosity=verbosity)
if self.use_cuda:
model = model.to(self._device())
self.model = model
self.batch_size = batch_size
self.shuffle = shuffle
self.patience = patience
self.wandb_config = wandb_config
def fit(
self,
x,
y=None,
validation_data=None,
**kwargs
):
"""Runs the training loop for pytorch model.
Arguments
---------
x :
Can be one of following
- an instance of torch.Dataset, y will be ignored
- an instance of torch.DataLoader, y will be ignored
- a torch tensor containing input data for each example
- a numpy array or pandas DataFrame
- a list of torch tensors or numpy arrays
y :
if `x` is torch tensor, then `y` is the label/target for
each corresponding example.
validation_data :
can be one of following:
- an instance of torch.Dataset
- an instance of torch.DataLoader
- a tuple of x,y pairs where x and y are tensors
Default is None, which means no validation is performed.
kwargs :
can be `callbacks` For example to use a callable
as callback use following
>>> callbacks = [{'after_epochs': 300, 'func': PlotStuff}]
where `PlotStuff` is a callable.
Each `callable` is provided with following keyword arguments
- epoch : the current epoch at which callable is called.
- model : the model
- train_data : training data_loader
- val_data : validation data_loader
"""
self.on_train_begin(x, y=y, validation_data=validation_data, **kwargs)
for epoch in range(self.num_epochs):
self.epoch = epoch
self.train_for_epoch()
self.validate_for_epoch()
self.on_epoch_end()
if epoch - self.best_epoch > self.patience:
if self.verbosity > 0:
print(f"Stopping early because improvment in loss did not happen since {self.best_epoch}th epoch")
break
return self.on_train_end()
def predict(
self,
x,
y=None,
batch_size: int = None,
reg_plot: bool = True,
name: str = None,
**kwargs
) -> np.ndarray:
"""Makes prediction on the given data
Arguments:
x : data on which to evalute. It can be
- a torch.utils.data.Dataset
- a torch.utils.data.DataLoader
- a torch.Tensor
- a numpy array
- a list of torch tensors numpy arrays
y : only relevent if `x` is torch.Tensor. It comprises labels for
correspoing x.
batch_size : None means make prediction on whole data in one go
reg_plot : whether to plot regression line or not
name : string to be used for title and name of saved plot
Returns:
predicted output as numpy array
"""
true, pred = self._eval(x=x, y=y, batch_size=batch_size)
if y is not None and reg_plot and pred.size > 0.0:
ep.regplot(true, pred, show=False)
plt.savefig(os.path.join(self.path, f'{name}_regplot.png'))
#if self.use_cuda:
torch.cuda.empty_cache()
gc.collect()
return pred
def _eval(self, x, y=None, batch_size=None):
loader, _ = self._get_loader(x=x, y=y, batch_size=batch_size, shuffle=False)
true, pred = [], []
for i, (batch_x, batch_y) in enumerate(loader):
batch_y, pred_y = self.eval(batch_x, batch_y)
true.append(batch_y.detach().cpu().numpy())
pred.append(pred_y.detach().cpu().numpy())
true = np.concatenate(true)
pred = np.concatenate(pred)
del loader
del batch_x
del batch_y
gc.collect()
return true, pred
def eval(self, batch_x, batch_y):
"""Calls the model with x and y data and returns trues and preds"""
batch_x = batch_x if isinstance(batch_x, list) else [batch_x]
batch_x = [tensor.float() for tensor in batch_x]
if self.use_cuda:
batch_x = [tensor.cuda() for tensor in batch_x]
batch_y = batch_y.cuda()
pred_y = self.model(*batch_x)
del batch_x
return batch_y, pred_y
def evaluate(
self,
x,
y,
batch_size: int = None,
metrics: Union[str, list] = 'r2',
**kwargs
):
"""
Evaluates the `model` on the given data.
Arguments:
x : data on which to evalute. It can be
- a torch.utils.data.Dataset
- a torch.utils.data.DataLoader
- a torch.Tensor
- a numpy.ndarray
- a list of torch tensors numpy arrays
y : It comprises labels for
correspoing x.
batch_size : None means make prediction on whole data in one go
metrics : name of performance metric to measure. It can be a single metric
or a list of metrics. Allowed metrics are anyone from
`ai4water.post_processing.SeqMetrics.RegressionMetrics`
kwargs :
Returns:
if metrics is string the returned value is float otherwise
it will be a dictionary
"""
# todo y->pred is only converting tensor into numpy array
true, pred = self._eval(x=x, y=y, batch_size=batch_size)
evaluator = RegressionMetrics(true, pred)
errors = {}
if isinstance(metrics, str):
errors = getattr(evaluator, metrics)()
else:
assert isinstance(metrics, list)
for m in metrics:
errors[m] = getattr(evaluator, m)()
return errors
def train_for_epoch(self):
"""Trains pytorch model for one complete epoch"""
epoch_losses = {metric: np.full(len(self.train_loader), np.nan) for metric in self.to_monitor}
# todo, it would be better to avoid reshaping/view at all
if hasattr(self.model, 'num_outs'):
num_outs = self.model.num_outs
else:
num_outs = self.num_outs
for i, (batch_x, batch_y) in enumerate(self.train_loader):
# todo, feeding batch_y to eval is only putting it on right device
# can we do it before?
batch_y, pred_y = self.eval(batch_x, batch_y)
if num_outs:
batch_y = batch_y.float().view(len(batch_y), num_outs)
pred_y = pred_y.view(len(pred_y), num_outs)
loss = self.criterion(batch_y, pred_y)
loss = loss.float()
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
# calculate metrics for each mini-batch
er = TorchMetrics(batch_y, pred_y)
for k, v in epoch_losses.items():
v[i] = getattr(er, k)().detach().item()
# epoch_losses['mse'][i] = loss.detach()
# take the mean for all mini-batches without considering infinite values
self.train_epoch_losses = {k: round(float(np.mean(v[np.isfinite(v)])), 4) for k, v in epoch_losses.items()}
if self.wandb_config is not None:
wandb.log(self.train_epoch_losses, step=self.epoch)
if self.use_cuda:
torch.cuda.empty_cache()
return
def validate_for_epoch(self):
"""If validation data is available, then it performs the validation """
if self.val_loader is not None:
epoch_losses = {metric: np.full(len(self.val_loader), np.nan) for metric in self.to_monitor}
for i, (batch_x, batch_y) in enumerate(self.val_loader):
batch_y, pred_y = self.eval(batch_x, batch_y)
# calculate metrics for each mini-batch
er = TorchMetrics(batch_y, pred_y)
for k, v in epoch_losses.items():
v[i] = getattr(er, k)().detach().item()
# take the mean for all mini-batches
self.val_epoch_losses = {f'val_{k}': round(float(np.mean(v)), 4) for k, v in epoch_losses.items()}
if self.wandb_config is not None:
wandb.log(self.val_epoch_losses, step=self.epoch)
for k, v in self.val_epoch_losses.items():
metric = k.split('_')[1]
f1 = F[metric][0]
f2 = F[metric][1]
if f2(v, f1(self.val_metrics[k])):
torch.save(self.model.state_dict(), self._weight_fname(self.epoch, v))
self.best_epoch = self.epoch
break # weights are saved for this epoch so no need to check other metrics
return
def _weight_fname(self, epoch, loss):
return os.path.join(getattr(self.model, 'w_path', self.path), f"weights_{epoch}_{loss}")
def _get_train_val_loaders(self, x, y=None, validation_data=None):
train_loader, self.num_outs = self._get_loader(x=x,
y=y,
batch_size=self.batch_size,
shuffle=self.shuffle)
val_loader, _ = self._get_loader(x=validation_data,
batch_size=self.batch_size,
shuffle=self.shuffle)
return train_loader, val_loader
def on_train_begin(self, x, y=None, validation_data=None, **kwargs):
self.cbs = kwargs.get('callbacks', []) # no callback by default
if self.verbosity > 0:
print("{}{}{}".format('*' * 25, 'Training Started', '*' * 25))
formatter = "{:<7}" + " {:<15}" * (len(self.train_metrics) + len(self.val_metrics))
print(formatter.format('Epoch: ',
*self.train_metrics.keys(),
*self.train_metrics.keys()))
print("{}".format('*' * 70))
if hasattr(self.model, 'loss'):
self.criterion = self.model.loss()
else:
self.criterion = self.loss
if hasattr(self.model, 'get_optimizer'):
self.optimizer = self.model.get_optimizer()
else:
self.optimizer = self.optimizer
self.train_loader, self.val_loader = self._get_train_val_loaders(
x,
y=y,
validation_data=validation_data)
if self.wandb_config is not None:
assert wandb is not None
assert isinstance(self.wandb_config, dict)
wandb.init(name=os.path.basename(self.path),
project=self.wandb_config.get('probject', 'test_project'),
notes='This is Learner from AI4Water test run',
tags=['ai4water', 'pytorch'],
entity=self.wandb_config.get('entity', ''))
return
def on_train_end(self):
self.update_weights()
self.train_metrics['loss'] = self.train_metrics.pop('mse')
self.val_metrics['val_loss'] = self.val_metrics.pop('val_mse')
class History(object):
history = {}
history.update(self.train_metrics)
history.update(self.val_metrics)
setattr(self, 'history', History())
if self.wandb_config is not None:
wandb.finish()
return History()
def update_weights(self, weight_file_path: str = None):
"""If `weight_file_path` is not given then it finds the best weights
and updates the model with best wieghts.
Arguments:
weight_file_path : complete path of weights which are to be loaded
"""
if weight_file_path:
assert os.path.exists(weight_file_path)
best_weights = os.path.basename(weight_file_path)
else:
w_path = getattr(self.model, 'w_path', self.path)
best_weights = find_best_weight(w_path, epoch_identifier=self.best_epoch)
if best_weights is not None:
if best_weights.endswith(".hdf5"): # todo, find_best_weight should not add .hdf5
best_weights = best_weights.split(".hdf5")[0]
weight_file_path = os.path.join(w_path, best_weights)
if best_weights is not None:
# fpath = os.path.splitext(weight_file_path)[0] # we are not saving the whole model but only state_dict
self.model.load_state_dict(torch.load(weight_file_path))
if self.verbosity > 0:
print("{} Successfully loaded weights from {} file {}".format('*' * 10, best_weights, '*' * 10))
return
def update_metrics(self):
for k, v in self.train_metrics.items():
v[self.epoch] = self.train_epoch_losses[k]
if self.val_loader is not None:
for k, v in self.val_metrics.items():
v[self.epoch] = self.val_epoch_losses[k]
return
def on_epoch_begin(self):
return
def on_epoch_end(self):
formatter = "{:<7}" + "{:<15.7f} " * (len(self.val_epoch_losses) + len(self.train_epoch_losses))
if self.val_loader is None: # otherwise model is already saved based upon validation performance
for k, v in self.train_epoch_losses.items():
f1 = F[k][0]
f2 = F[k][1]
if f2(v, f1(self.train_metrics[k])):
torch.save(self.model.state_dict(), self._weight_fname(self.epoch, v))
self.best_epoch = self.epoch
break
if self.verbosity > 0:
print(formatter.format(self.epoch, *self.train_epoch_losses.values(), *self.val_epoch_losses.values()))
for cb in self.cbs:
if self.epoch % cb['after_epochs'] == 0:
cb['func'](epoch=self.epoch,
model=self.model,
train_data=self.train_loader,
val_data=self.val_loader
)
self.update_metrics()
return
def _get_loader(self, x, y=None, batch_size=None, shuffle=True):
data_loader = None
num_outs = None
if x is None:
return None, None
if isinstance(x, list):
if len(x) == 1:
x = x[0]
if isinstance(x, torch.utils.data.Dataset):
dataset = x
else:
dataset = to_torch_dataset(x, y)
else:
dataset = to_torch_dataset(x, y)
elif isinstance(x, (np.ndarray, pd.DataFrame)):
if y is not None:
# if x is numpy array or DataFrame, so should y
assert isinstance(y, (np.ndarray, pd.DataFrame, pd.Series))
# if it is DataFrame or Series
if hasattr(y, 'values'):
y = y.values
if len(y.shape) == 1:
num_outs = 1
else:
num_outs = y.shape[-1]
if isinstance(x, pd.DataFrame):
x = x.values
dataset = to_torch_dataset(x, y)
elif isinstance(x, torch.utils.data.Dataset):
dataset = x
elif isinstance(x, torch.utils.data.DataLoader):
data_loader = x
elif isinstance(x, torch.Tensor):
dataset = to_torch_dataset(x=x, y=y)
elif isinstance(x, tuple): # x is tuple of x,y pairs
assert len(x) == 2
dataset = to_torch_dataset(x=x[0], y=x[1])
else:
raise NotImplementedError(f'unrecognized data of type {x.__class__.__name__} given')
if data_loader is None:
if batch_size is None:
batch_size = len(dataset)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle
)
return data_loader, num_outs
def plot_model_using_tensorboard(
self,
x=None,
path='tensorboard/tensorboard'
):
"""Plots the neural network on tensorboard
Arguments
---------
x : torch.Tensor
input to the model
path : str
path to save tensorboard graph
"""
from torch.utils.tensorboard import SummaryWriter
# default `log_dir` is "runs" - we'll be more specific here
writer = SummaryWriter(path)
if x is None:
x, _ = iter(self.train_loader).next()
writer.add_graph(self.model, x)
writer.close()
return
def plot_model(self, y=None):
"""Helper function to plot dot diagram of model using torchviz module.
Arguments
---------
y : torch.Tensor
output tensor
"""
try:
from torchviz import make_dot
except ModuleNotFoundError:
print("You must install torchviz to plot model."
"see https://github.com/szagoruyko/pytorchviz#installation for installation")
return
if y is None:
x, _ = iter(self.train_loader).next()
y = self.model(x)
fname = os.path.join(self.path, 'model.png')
dot = make_dot(y, dict(self.model.named_parameters()),
show_attrs=True,
show_saved=True)
dot.render(fname)
return dot
def get_metrics_to_monitor(metrics):
if metrics is None:
_metrics = ['mse']
elif isinstance(metrics, list):
_metrics = metrics + ['mse']
else:
assert isinstance(metrics, str)
_metrics = ['mse', metrics]
return list(set(_metrics)) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/models/_torch/pytorch_training.py | pytorch_training.py |
import gc
import json
import math
import warnings
from typing import Union, Tuple, List, Callable, Optional
from SeqMetrics import RegressionMetrics, ClassificationMetrics
from ai4water.backend import tf, os, np, pd, plt, easy_mpl, sklearn
from ai4water.backend import xgboost, catboost, lightgbm
from ai4water.hyperopt import HyperOpt
from ai4water.preprocessing import DataSet
from ai4water.utils.utils import make_model
from ai4water.utils.utils import TrainTestSplit
from ai4water.utils.utils import jsonize, ERROR_LABELS
from ai4water.utils.utils import AttribtueSetter
from ai4water.postprocessing import ProcessPredictions
from ai4water.utils.visualizations import edf_plot
from ai4water.utils.utils import create_subplots
from ai4water.utils.utils import find_best_weight, dateandtime_now, dict_to_file
from ai4water.functional import Model as FModel
from ai4water._main import BaseModel
plot = easy_mpl.plot
bar_chart = easy_mpl.bar_chart
taylor_plot = easy_mpl.taylor_plot
dumbbell_plot = easy_mpl.dumbbell_plot
reg_plot = easy_mpl.regplot
if tf is not None:
if 230 <= int(''.join(tf.__version__.split('.')[0:2]).ljust(3, '0')) < 250:
from ai4water.functional import Model
print(f"""
Switching to functional API due to tensorflow version {tf.__version__}
for experiments""")
else:
from ai4water import Model
else:
from ai4water import Model
SEP = os.sep
# todo plots comparing different models in following youtube videos at 6:30 and 8:00 minutes.
# https://www.youtube.com/watch?v=QrJlj0VCHys
# compare models using statistical tests wuch as Giacomini-White test or Diebold-Mariano test
# paired ttest 5x2cv
# in order to unify the use of metrics
Metrics = {
'regression': lambda t, p, multiclass=False, **kwargs: RegressionMetrics(
t, p, **kwargs),
'classification': lambda t, p, multiclass=False, **kwargs: ClassificationMetrics(
t, p, multiclass=multiclass, **kwargs)
}
Monitor = {
'regression': ['r2', 'corr_coeff', 'mse', 'rmse', 'r2_score',
'nse', 'kge', 'mape', 'pbias', 'bias', 'mae', 'nrmse',
'mase'],
'classification': ['accuracy', 'precision', 'recall', 'f1_score']
}
reg_dts = ["ExtraTreeRegressor","DecisionTreeRegressor",
"ExtraTreesRegressor", "RandomForestRegressor",
"AdaBoostRegressor", "BaggingRegressor",
"HistGradientBoostingRegressor", "GradientBoostingRegressor"]
cls_dts = ["DecisionTreeClassifier", "ExtraTreeClassifier",
"ExtraTreesClassifier", "AdaBoostClassifier","RandomForestClassifier", "BaggingClassifier"
"GradientBoostingClassifier", "HistGradientBoostingClassifier"]
if xgboost is not None:
reg_dts += ["XGBRegressor"]
cls_dts += ["XGBClassifier"]
if catboost is not None:
reg_dts += ["CatBoostRegressor"]
cls_dts += ["CatBoostClassifier"]
if lightgbm is not None:
reg_dts += ["LGBMRegressor"]
cls_dts += ["LGBMClassifier"]
DTs = {"regression":
reg_dts,
"classification":
cls_dts
}
LMs = {
"regression":
["LinearRegression", "Ridge", "RidgeCV", "SGDRegressor",
"ElasticNetCV", "ElasticNet",
"Lasso", "LassoCV", "Lars", "LarsCV", "LassoLars", "LassoLarsCV", "LassoLarsIC"],
"classification":
["LogisticRegression", "LogisticRegressionCV", "PassiveAggressiveClassifier", "Perceptron",
"RidgeClassifier", "RidgeClassifierCV", "SGDClassifier", "SGDClassifierCV"]
}
class Experiments(object):
"""
Base class for all the experiments.
All the experiments must be subclasses of this class.
The core idea of ``Experiments`` is based upon ``model``. An experiment
consists of one or more models. The models differ from each other in their
structure/idea/concept/configuration. When :py:meth:`ai4water.experiments.Experiments.fit`
is called, each ``model`` is built and trained. The user can customize, building
and training process by subclassing this class and customizing
:py:meth:`ai4water.experiments.Experiments._build` and
:py:meth:`ai4water.experiments.Experiments._fit` methods.
Attributes
------------
- metrics
- exp_path
- model_
- models
Methods
--------
- fit
- taylor_plot
- loss_comparison
- plot_convergence
- from_config
- compare_errors
- plot_improvement
- compare_convergence
- plot_cv_scores
- fit_with_tpot
"""
def __init__(
self,
cases: dict = None,
exp_name: str = None,
num_samples: int = 5,
verbosity: int = 1,
monitor: Union[str, list, Callable] = None,
show: bool = True,
save: bool = True,
**model_kws,
):
"""
Arguments
---------
cases :
python dictionary defining different cases/scenarios. See TransformationExperiments
for use case.
exp_name :
name of experiment, used to define path in which results are saved
num_samples :
only relevent when you wan to optimize hyperparameters of models
using ``grid`` method
verbosity : bool, optional
determines the amount of information
monitor : str, list, optional
list of performance metrics to monitor. It can be any performance
metric SeqMetrics_ library.
By default ``r2``, ``corr_coeff``, ``mse``, ``rmse``, ``r2_score``,
``nse``, ``kge``, ``mape``, ``pbias``, ``bias``, ``mae``, ``nrmse``
``mase`` are considered for regression and ``accuracy``, ``precision``
``recall`` are considered for classification. The user can also put a
custom metric to monitor. In such a case we it should be callable which
accepts two input arguments. The first one is array of true and second is
array of predicted values.
>>> def f1_score(t,p)->float:
>>> return ClassificationMetrics(t, p).f1_score(average="macro")
>>> monitor = [f1_score, "accuracy"]
Here ``f1_score`` is a function which accepts two arays.
**model_kws :
keyword arguments which are to be passed to `Model`
and are not optimized.
.. _SeqMetrics:
https://seqmetrics.readthedocs.io/en/latest/index.html
"""
self.opt_results = None
self.optimizer = None
self.exp_name = 'Experiments_' + str(dateandtime_now()) if exp_name is None else exp_name
self.num_samples = num_samples
self.verbosity = verbosity
self.show = show
self.save = save
self.models = [method for method in dir(self) if method.startswith('model_')]
if cases is None:
cases = {}
self.cases = {'model_' + key if not key.startswith('model_') else key: val for key, val in cases.items()}
self.models = self.models + list(self.cases.keys())
self.exp_path = os.path.join(os.getcwd(), "results", self.exp_name)
if not os.path.exists(self.exp_path):
os.makedirs(self.exp_path)
self.eval_models = {}
self.optimized_models = {}
if monitor is None:
self.monitor = Monitor[self.mode]
else:
if not isinstance(monitor, list):
monitor = [monitor]
self.monitor = monitor
self.model_kws = model_kws
# _run_type is actually set during call to .fit
self._run_type = None
@property
def category(self)->str:
raise NotImplementedError
@property
def plots_(self)->list:
if self.mode == "regression":
return ['regression', 'prediction', "residual", "edf"]
return []
def metric_kws(self, metric_name:str=None)->dict:
return {}
def _pre_build_hook(self, **suggested_paras):
"""Anything that needs to be performed before building the model."""
return suggested_paras
def config(self)->dict:
_config = {
"models":self.models,
"exp_path": self.exp_path,
"exp_name": self.exp_name,
"cases": self.cases,
"model_kws": jsonize(self.model_kws),
"eval_models": self.eval_models,
"optimized_models": self.optimized_models,
}
for attr in ['is_multiclass_', 'is_binary_', 'is_multilabel_', 'considered_models_']:
if hasattr(self, attr):
_config[attr] = getattr(self, attr)
return _config
def save_config(self):
dict_to_file(self.exp_path, config=self.config())
return
def build_from_config(self, config_path:str)->BaseModel:
assert os.path.exists(config_path), f"{config_path} does not exist"
if self.category == "DL":
model = FModel
else:
model = Model
model = model.from_config_file(config_path=config_path)
assert isinstance(model, BaseModel)
setattr(self, 'model_', model)
return model
def update_model_weight(
self,
model:Model,
config_path:str
):
"""updates the weight of model.
If no saved weight is found, a warning is raised.
"""
best_weights = find_best_weight(os.path.join(config_path, "weights"))
# Sometimes no weight is saved when the prediction is None,
# todo
# should we raise error in such case? What are other cases when best_weights can be None
if best_weights is None:
warnings.warn(f"Can't find weight for {model} from \n {config_path}")
return
weight_file = os.path.join(model.w_path, best_weights)
model.update_weights(weight_file=weight_file)
if self.verbosity>1:
print("{} Successfully loaded weights from {} file {}".format('*' * 10, weight_file, '*' * 10))
return
@property
def tpot_estimator(self):
raise NotImplementedError
@property
def mode(self):
raise NotImplementedError
@property
def num_samples(self):
return self._num_samples
@num_samples.setter
def num_samples(self, x):
self._num_samples = x
def _reset(self):
self.cv_scores_ = {}
self.metrics = {}
self.features = {}
self.iter_metrics = {}
self.considered_models_ = []
return
def _named_x0(self)->dict:
x0 = getattr(self, 'x0', None)
param_space = getattr(self, 'param_space', None)
if param_space:
names = [s.name for s in param_space]
if x0:
return {k: v for k, v in zip(names, x0)}
return {}
def _get_config(self, model_type, model_name, **suggested_paras):
# the config must contain the suggested parameters by the hpo algorithm
if model_type in self.cases:
config = self.cases[model_type]
config.update(suggested_paras)
elif model_name in self.cases:
config = self.cases[model_name]
config.update(suggested_paras)
elif hasattr(self, model_type):
config = getattr(self, model_type)(**suggested_paras)
else:
raise TypeError
return config
def _dry_run_a_model(
self,
model_type,
model_name,
cross_validate,
train_x, train_y,
val_x, val_y):
"""runs the `.fit` of allt he models being considered once.
Also populates following attributes
- eval_models
- cv_scores_
- metrics
- features
"""
if self.verbosity >= 0: print(f"running {model_name} model")
config = self._get_config(model_type, model_name, **self._named_x0())
model = self._build_fit(
train_x, train_y,
title=f"{self.exp_name}{SEP}{model_name}",
validation_data=(val_x, val_y),
cross_validate=cross_validate,
**config)
train_results = self._predict(model=model, x=train_x, y=train_y)
self._populate_results(model_name, train_results)
if val_x is not None and (hasattr(val_x, '__len__') and len(val_x)>0):
val_results = self._predict(model=model, x=val_x, y=val_y)
self._populate_results(model_name, train_results=train_results, val_results=val_results)
if cross_validate:
cv_scoring = model.val_metric
self.cv_scores_[model_type] = getattr(model, f'cross_val_scores')
setattr(self, '_cv_scoring', cv_scoring)
self.eval_models[model_type] = self.model_.path
return
def _optimize_a_model(
self,
model_type,
model_name,opt_method,
num_iterations,
cross_validate,
post_optimize,
train_x, train_y,
val_x, val_y,
**hpo_kws,
):
def objective_fn(**suggested_paras) -> float:
config = self._get_config(model_type, model_name, **suggested_paras)
return self._build_fit_eval(
train_x=train_x,
train_y=train_y,
validation_data=(val_x, val_y),
cross_validate=cross_validate,
title=f"{self.exp_name}{SEP}{model_name}",
**config)
opt_dir = os.path.join(os.getcwd(),
f"results{SEP}{self.exp_name}{SEP}{model_name}")
if self.verbosity > 0:
print(f"optimizing {model_name} using {opt_method} method")
self.optimizer = HyperOpt(
opt_method,
objective_fn=objective_fn,
param_space=self.param_space,
opt_path=opt_dir,
num_iterations=num_iterations, # number of iterations
x0=self.x0,
verbosity=self.verbosity,
**hpo_kws
)
self.opt_results = self.optimizer.fit()
self.optimized_models[model_type] = self.optimizer.opt_path
if cross_validate:
# if we do train_best, self.model_ will change and this
cv_scoring = self.model_.val_metric
self.cv_scores_[model_type] = getattr(self.model_, f'cross_val_scores')
setattr(self, '_cv_scoring', cv_scoring)
x, y = _combine_training_validation_data(train_x, train_y, (val_x, val_y))
if post_optimize == 'eval_best':
train_results = self.eval_best(x, y, model_name, opt_dir)
else:
train_results = self.train_best(x, y, model_name)
self._populate_results(model_type, train_results)
if not hasattr(self, 'model_'): # todo asking user to define this parameter is not good
raise ValueError(f'The `build` method must set a class level attribute named `model_`.')
self.eval_models[model_type] = self.model_.path
self.iter_metrics[model_type] = self.model_iter_metric
return
def fit(
self,
x=None,
y=None,
data=None,
validation_data: Optional[tuple] = None,
run_type: str = "dry_run",
opt_method: str = "bayes",
num_iterations: int = 12,
include: Union[None, list, str] = None,
exclude: Union[None, list, str] = '',
cross_validate: bool = False,
post_optimize: str = 'eval_best',
**hpo_kws
):
"""
Runs the fit loop for all the ``models`` of experiment. The user can
however, specify the models by making use of ``include`` and ``exclude``
keywords.
The data should be defined according to following four rules
either
- only x,y should be given (val will be taken from it according to splitting schemes)
- or x,y and validation_data should be given
- or only data should be given (train and validation data will be
taken accoring to splitting schemes)
Parameters
----------
x :
input data. When ``run_type`` is ``dry_run``, then the each model is trained
on this data. If ``run_type`` is ``optimize``, validation_data is not given,
then x,y pairs of validation data are extracted from this data based
upon splitting scheme i.e. ``val_fraction`` argument.
y :
label/true/observed data
data :
Raw unprepared data from which x,y pairs for training and validation
will be extracted.
this will be passed to :py:meth:`ai4water.Model.fit`.
This is is only required if ``x`` and ``y`` are not given
validation_data :
a tuple which consists of x,y pairs for validation data. This can only
be given if ``x`` and ``y`` are given and ``data`` is not given.
run_type : str, optional (default="dry_run")
One of ``dry_run`` or ``optimize``. If ``dry_run``, then all
the `models` will be trained only once. if ``optimize``, then
hyperparameters of all the models will be optimized.
opt_method : str, optional (default="bayes")
which optimization method to use. options are ``bayes``,
``random``, ``grid``. Only valid if ``run_type`` is ``optimize``
num_iterations : int, optional
number of iterations for optimization. Only valid
if ``run_type`` is ``optimize``.
include : list/str optional (default="DTs")
name of models to included. If None, all the models found
will be trained and or optimized. Default is "DTs", which
means all decision tree based models will be used.
exclude :
name of ``models`` to be excluded
cross_validate : bool, optional (default=False)
whether to cross validate the model or not. This
depends upon `cross_validator` agrument to the `Model`.
post_optimize : str, optional
one of ``eval_best`` or ``train_best``. If eval_best,
the weights from the best models will be uploaded again and the model
will be evaluated on train, test and all the data. If ``train_best``,
then a new model will be built and trained using the parameters of
the best model.
**hpo_kws :
keyword arguments for :py:class:`ai4water.hyperopt.HyperOpt` class.
Examples
---------
>>> from ai4water.experiments import MLRegressionExperiments
>>> from ai4water.datasets import busan_beach
>>> exp = MLRegressionExperiments()
>>> exp.fit(data=busan_beach())
If you want to compare only RandomForest, XGBRegressor, CatBoostRegressor
and LGBMRegressor, use the ``include`` keyword
>>> exp.fit(data=busan_beach(), include=['RandomForestRegressor', 'XGBRegressor',
>>> 'CatBoostRegressor', 'LGBMRegressor'])
Similarly, if you want to exclude certain models from comparison, you can
use ``exclude`` keyword
>>> exp.fit(data=busan_beach(), exclude=["SGDRegressor"])
if you want to perform cross validation for each model, we must give
the ``cross_validator`` argument which will be passed to ai4water Model
>>> exp = MLRegressionExperiments(cross_validator={"KFold": {"n_splits": 10}})
>>> exp.fit(data=busan_beach(), cross_validate=True)
Setting ``cross_validate`` to True will populate `cv_scores_` dictionary
which can be accessed as ``exp.cv_scores_``
if you want to optimize the hyperparameters of each model,
>>> exp.fit(data=busan_beach(), run_type="optimize", num_iterations=20)
"""
train_x, train_y, val_x, val_y, _, _ = self.verify_data(
x, y, data, validation_data)
AttribtueSetter(self, train_y)
del x, y, data, validation_data
gc.collect()
assert run_type in ['optimize', 'dry_run'], f"run_type mus"
self._run_type = run_type
assert post_optimize in ['eval_best', 'train_best'], f"""
post_optimize must be either 'eval_best' or 'train_best' but it is {post_optimize}"""
if exclude == '':
exclude = []
if hpo_kws is None:
hpo_kws = {}
models_to_consider = self._check_include_arg(include)
if exclude is None:
exclude = []
elif isinstance(exclude, str):
exclude = [exclude]
consider_exclude(exclude, self.models, models_to_consider)
self._reset()
setattr(self, 'considered_models_', models_to_consider)
for model_type in models_to_consider:
model_name = model_type.split('model_')[1]
self.model_iter_metric = {}
self.iter_ = 0
# there may be attributes int the model, which needs to be loaded so run the method first.
# such as param_space etc.
if hasattr(self, model_type):
getattr(self, model_type)()
if run_type == 'dry_run':
self._dry_run_a_model(
model_type,
model_name,
cross_validate,
train_x, train_y, val_x, val_y)
else:
self._optimize_a_model(
model_type,
model_name,
opt_method,
num_iterations,
cross_validate,
post_optimize,
train_x, train_y,
val_x, val_y,
**hpo_kws
)
self.save_config()
save_json_file(os.path.join(self.exp_path, 'features.json'), self.features)
save_json_file(os.path.join(self.exp_path, 'metrics.json'), self.metrics)
return
def eval_best(
self,
x,
y,
model_type:str,
opt_dir:str,
):
"""Evaluate the best models."""
folders = [path for path in os.listdir(opt_dir) if os.path.isdir(os.path.join(opt_dir, path)) and path.startswith('1_')]
if len(folders) < 1:
return self.train_best(x, y, model_type)
assert len(folders) == 1, f"{folders}"
for mod_path in folders:
config_path = os.path.join(opt_dir, mod_path, "config.json")
model = self.build_from_config(config_path)
self.update_model_weight(model, os.path.join(opt_dir, mod_path))
results = self._predict(model, x=x, y=y)
return results
def train_best(
self,
x,
y,
model_type,
):
"""Finds the best model, builts it, fits it and makes predictions from it."""
best_paras = self.optimizer.best_paras()
if best_paras.get('lookback', 1) > 1:
_model = 'layers'
else:
_model = model_type
title = f"{self.exp_name}{SEP}{model_type}{SEP}best"
model = self._build_fit(x, y,
view=False,
title=title,
cross_validate=False,
model={_model: self.optimizer.best_paras()},
)
results = self._predict(model, x=x, y=y)
return results
def _populate_results(
self,
model_type: str,
train_results: Tuple[np.ndarray, np.ndarray],
val_results: Tuple[np.ndarray, np.ndarray] = None,
test_results: Tuple[np.ndarray, np.ndarray] = None,
):
"""populates self.metrics and self.features dictionaries"""
if not model_type.startswith('model_'): # internally we always use model_ at the start.
model_type = f'model_{model_type}'
metrics = dict()
features = dict()
# save performance metrics of train and test
if train_results is not None:
metrics['train'] = self._get_metrics(*train_results)
features['train'] = {
'true': {'std': np.std(train_results[0])},
'simulation': {'std': np.std(train_results[1])}
}
if val_results is not None:
metrics['val'] = self._get_metrics(*val_results)
features['val'] = {
'true': {'std': np.std(val_results[0])},
'simulation': {'std': np.std(val_results[1])}
}
if test_results is not None:
self.metrics[model_type]['test'] = self._get_metrics(*test_results)
self.features[model_type]['test'] = {
'true': {'std': np.std(test_results[0])},
'simulation': {'std': np.std(test_results[1])}
}
if metrics:
self.metrics[model_type] = metrics
self.features[model_type] = features
return
def _get_metrics(self, true:np.ndarray, predicted:np.ndarray)->dict:
"""get the performance metrics being monitored given true and predicted data"""
metrics_inst = Metrics[self.mode](true, predicted,
replace_nan=True,
replace_inf=True,
multiclass=self.is_multiclass_)
metrics = {}
for metric in self.monitor:
if isinstance(metric, str):
metrics[metric] = getattr(metrics_inst, metric)(**self.metric_kws(metric))
elif callable(metric):
# metric is a callable
metrics[metric.__name__] = metric(true, predicted)
else:
raise ValueError(f"invalid metric f{metric}")
return metrics
def taylor_plot(
self,
x=None,
y=None,
data=None,
include: Union[None, list] = None,
exclude: Union[None, list] = None,
figsize: tuple = (5, 8),
**kwargs
) -> plt.Figure:
"""
Compares the models using taylor_plot_.
Parameters
----------
x :
input data, if not given, then ``data`` must be given.
y :
target data
data :
raw unprocessed data from which x,y pairs can be drawn. This data
will be passed to DataSet class and :py:meth:`ai4water.preprocessing.DataSet.test_data`
method will be used to draw x,y pairs.
include : str, list, optional
if not None, must be a list of models which will be included.
None will result in plotting all the models.
exclude : str, list, optional
if not None, must be a list of models which will excluded.
None will result in no exclusion
figsize : tuple, optional
figure size as (width,height)
**kwargs :
all the keyword arguments for taylor_plot_ function.
Returns
-------
plt.Figure
Example
-------
>>> from ai4water.experiments import MLRegressionExperiments
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> inputs = list(data.columns)[0:-1]
>>> outputs = list(data.columns)[-1]
>>> experiment = MLRegressionExperiments(input_features=inputs, output_features=outputs)
>>> experiment.fit(data=data)
>>> experiment.taylor_plot(data=data)
.. _taylor_plot:
https://easy-mpl.readthedocs.io/en/latest/plots.html#easy_mpl.taylor_plot
"""
_, _, _, _, x, y = self.verify_data(data=data, test_data=(x, y))
self._build_predict_from_configs(x, y)
metrics = self.metrics.copy()
include = self._check_include_arg(include)
if exclude is not None:
consider_exclude(exclude, self.models, metrics)
if 'name' in kwargs:
fname = kwargs.pop('name')
else:
fname = 'taylor'
fname = os.path.join(os.getcwd(), f'results{SEP}{self.exp_name}{SEP}{fname}.png')
train_std = [_model['train']['true']['std'] for _model in self.features.values()]
train_std = list(set(train_std))[0]
if 'test' in list(self.features.values())[0]:
test_stds = [_model['test']['true']['std'] for _model in self.features.values()]
test_data_type = "test"
else:
test_stds = [_model['val']['true']['std'] for _model in self.features.values()]
test_data_type = "val"
# if any value in test_stds is nan, set(test_stds)[0] will be nan
if np.isnan(list(set(test_stds)))[0]:
test_std = list(set(test_stds))[1]
else:
test_std = list(set(test_stds))[0]
assert not np.isnan(test_std)
observations = {'train': {'std': train_std},
test_data_type: {'std': test_std}}
simulations = {'train': None, test_data_type: None}
for scen in ['train', test_data_type]:
scen_stats = {}
for model, _metrics in metrics.items():
model_stats = {'std': self.features[model][scen]['simulation']['std'],
'corr_coeff': _metrics[scen]['corr_coeff'],
'pbias': _metrics[scen]['pbias']
}
if model in include:
key = shred_model_name(model)
scen_stats[key] = model_stats
simulations[scen] = scen_stats
ax = taylor_plot(
observations=observations,
simulations=simulations,
figsize=figsize,
show=False,
**kwargs
)
if self.save:
plt.savefig(fname, dpi=600, bbox_inches="tight")
if self.show:
plt.show()
return ax
def _consider_include(self, include: Union[str, list], to_filter:dict):
filtered = {}
include = self._check_include_arg(include)
for m in include:
if m in to_filter:
filtered[m] = to_filter[m]
return filtered
def _check_include_arg(
self,
include:Union[str, List[str]],
default=None,
)->list:
"""
if include is None, then self.models is returned.
"""
if default is None:
default = self.models
if isinstance(include, str):
if include == "DTs":
include = DTs[self.mode]
elif include == "LMs":
include = LMs[self.mode]
else:
include = [include]
if include is None:
include = default
include = ['model_' + _model if not _model.startswith('model_') else _model for _model in include]
# make sure that include contains same elements which are present in models
for elem in include:
assert elem in self.models, f"""
{elem} to `include` are not available.
Available cases are {self.models} and you wanted to include
{include}
"""
return include
def plot_improvement(
self,
metric_name: str,
plot_type: str = 'dumbbell',
lower_limit: Union[int, float] = -1.0,
upper_limit: Union[int, float] = None,
name: str = '',
**kwargs
) -> pd.DataFrame:
"""
Shows how much improvement was observed after hyperparameter
optimization. This plot is only available if ``run_type`` was set to
`optimize` in :py:meth:`ai4water.experiments.Experiments.fit`.
Arguments
---------
metric_name :
the peformance metric for comparison
plot_type : str, optional
the kind of plot to draw. Either ``dumbbell`` or ``bar``
lower_limit : float/int, optional (default=-1.0)
clip the values below this value. Set this value to None to avoid clipping.
upper_limit : float/int, optional (default=None)
clip the values above this value
name : str, optional
name of file to save the figure
**kwargs :
any additional keyword arguments for
`dumbell plot <https://easy-mpl.readthedocs.io/en/latest/plots.html#easy_mpl.dumbbell_plot>`_
or `bar_chart <https://easy-mpl.readthedocs.io/en/latest/plots.html#easy_mpl.bar_chart>`_
Returns
-------
pd.DataFrame
Examples
--------
>>> from ai4water.experiments import MLRegressionExperiments
>>> from ai4water.datasets import busan_beach
>>> experiment = MLRegressionExperiments()
>>> experiment.fit(data=busan_beach(), run_type="optimize", num_iterations=30)
>>> experiment.plot_improvement('r2')
...
>>> # or draw dumbbell plot
...
>>> experiment.plot_improvement('r2', plot_type='bar')
"""
assert self._run_type == "optimize", f"""
when run_type argument during .fit() is {self._run_type}, we can
not have improvement plot"""
data: str = 'test'
assert data in ['training', 'test', 'validation']
improvement = pd.DataFrame(columns=['start', 'end'])
for model, model_iter_metrics in self.iter_metrics.items():
initial = model_iter_metrics[0][metric_name]
final = self.metrics[model]['test'][metric_name]
key = shred_model_name(model)
improvement.loc[key] = [initial, final]
baseline = improvement['start']
if lower_limit:
baseline = np.where(baseline < lower_limit, lower_limit, baseline)
if upper_limit:
baseline = np.where(baseline > upper_limit, upper_limit, baseline)
improvement['start'] = baseline
if plot_type == "dumbbell":
dumbbell_plot(
improvement['start'],
improvement['end'],
improvement.index.tolist(),
ax_kws=dict(xlabel=ERROR_LABELS.get(metric_name, metric_name)),
show=False,
**kwargs
)
#ax.set_xlabel(ERROR_LABELS.get(metric_name, metric_name))
else:
colors = {
'start': np.array([0, 56, 104]) / 256,
'end': np.array([126, 154, 178]) / 256
}
order = ['start', 'end']
if metric_name in ['r2', 'nse', 'kge', 'corr_coeff', 'r2_mod', 'r2_score']:
order = ['end', 'start']
fig, ax = plt.subplots()
for ordr in order:
bar_chart(improvement[ordr], improvement.index.tolist(),
ax=ax, color=colors[ordr], show=False,
ax_kws={'xlabel':ERROR_LABELS.get(metric_name, metric_name),
'label':ordr}, **kwargs)
ax.legend()
plt.title('Improvement after Optimization')
if self.save:
fname = os.path.join(
os.getcwd(),
f'results{SEP}{self.exp_name}{SEP}{name}_improvement_{metric_name}.png')
plt.savefig(fname, dpi=300, bbox_inches=kwargs.get('bbox_inches', 'tight'))
if self.show:
plt.show()
return improvement
def compare_errors(
self,
matric_name: str,
x=None,
y=None,
data = None,
cutoff_val: float = None,
cutoff_type: str = None,
sort_by: str = 'test',
ignore_nans: bool = True,
colors = None,
cmaps = None,
figsize:tuple = None,
**kwargs
) -> pd.DataFrame:
"""
Plots a specific performance matric for all the models which were
run during :py:meth:`ai4water.experiments.Experiments.fit` call.
Parameters
----------
matric_name : str
performance matric whose value to plot for all the models
x :
input data, if not given, then ``data`` must be given.
y :
target data
data :
raw unprocessed data from which x,y pairs can be drawn. This data
will be passed to :py:meth:`ai4water.preprocessing.DataSet` class and
:py:meth:`ai4water.preprocessing.DataSet.test_data` method
will be used to draw x,y pairs.
cutoff_val : float
if provided, only those models will be plotted for whome the
matric is greater/smaller than this value. This works in conjuction
with `cutoff_type`.
cutoff_type : str
one of ``greater``, ``greater_equal``, ``less`` or ``less_equal``.
Criteria to determine cutoff_val. For example if we want to
show only those models whose $R^2$ is > 0.5, it will be 'max'.
sort_by:
either ``test`` or ``train``. How to sort the results for plotting.
If 'test', then test performance matrics will be sorted otherwise
train performance matrics will be sorted.
ignore_nans:
default True, if True, then performance matrics with nans are ignored
otherwise nans/empty bars will be shown to depict which models have
resulted in nans for the given performance matric.
colors :
color for bar chart. To assign separate colors for both bar charts, provide
a list of two.
cmaps :
color map for bar chart. To assign separate cmap for both bar charts, provide
a list of two.
figsize : tuple
figure size as (width, height)
**kwargs :
any keyword argument that goes to `easy_mpl.bar_chart`
returns
-------
pd.DataFrame
pandas dataframe whose index is models and has two columns with name
'train' and 'test' These columns contain performance metrics for the
models..
Example
-------
>>> from ai4water.experiments import MLRegressionExperiments
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> inputs = list(data.columns)[0:-1]
>>> outputs = list(data.columns)[-1]
>>> experiment = MLRegressionExperiments(input_features=inputs, output_features=outputs)
>>> experiment.fit(data=data)
>>> experiment.compare_errors('mse', data=data)
>>> experiment.compare_errors('r2', data=data, cutoff_val=0.2, cutoff_type='greater')
"""
_, _, _, _, x, y = self.verify_data(data=data, test_data=(x, y))
# populate self.metrics dictionary
self._build_predict_from_configs(x, y)
models = self.sort_models_by_metric(matric_name, cutoff_val, cutoff_type,
ignore_nans, sort_by)
plt.close('all')
fig, axis = plt.subplots(1, 2, sharey='all', figsize=figsize)
labels = [model.split('model_')[1] for model in models.index.tolist()]
models.index = labels
if kwargs is not None:
for arg in ['ax', 'labels', 'values', 'show', 'sort', 'ax_kws']:
assert arg not in kwargs, f"{arg} not allowed in kwargs"
color1, color2 = None, None
if colors is not None:
if hasattr(colors, '__len__') and len(colors)==2:
color1, color2 = colors
else:
color1 = colors
color2 = colors
cmap1, cmap2 = None, None
if cmaps is not None:
if hasattr(cmaps, '__len__') and len(cmaps)==2:
cmap1, cmap2 = cmaps
else:
cmap1 = cmaps
cmap2 = cmaps
bar_chart(ax=axis[0],
labels=labels,
color=color1,
cmap=cmap1,
values=models['train'],
ax_kws={'title':"Train",
'xlabel':ERROR_LABELS.get(matric_name, matric_name)},
show=False,
**kwargs,
)
bar_chart(ax=axis[1],
labels=labels,
values=models.iloc[:, 1],
color=color2,
cmap=cmap2,
ax_kws={'title': models.columns.tolist()[1],
'xlabel':ERROR_LABELS.get(matric_name, matric_name),
'show_yaxis':False},
show=False,
**kwargs
)
appendix = f"{cutoff_val or ''}{cutoff_type or ''}{len(models)}"
if self.save:
fname = os.path.join(
os.getcwd(),
f'results{SEP}{self.exp_name}{SEP}ErrorComprison_{matric_name}_{appendix}.png')
plt.savefig(fname, dpi=100, bbox_inches='tight')
if self.show:
plt.show()
return models
def loss_comparison(
self,
loss_name: str = 'loss',
include: list = None,
figsize: int = None,
start: int = 0,
end: int = None,
**kwargs
) -> plt.Axes:
"""
Plots the loss curves of the evaluated models. This method is only available
if the models which are being compared are deep leanring mdoels.
Parameters
----------
loss_name : str, optional
the name of loss value, must be recorded during training
include:
name of models to include
figsize : tuple
size of the figure
start : int
end : int
**kwargs :
any other keyword arguments to be passed to the
`plot <https://easy-mpl.readthedocs.io/en/latest/plots.html#easy_mpl.plot>`_
Returns
-------
matplotlib axes
Example
-------
>>> from ai4water.experiments import DLRegressionExperiments
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> exp = DLRegressionExperiments(
>>> input_features = data.columns.tolist()[0:-1],
>>> output_features = data.columns.tolist()[-1:],
>>> epochs=300,
>>> train_fraction=1.0,
>>> y_transformation="log",
>>> x_transformation="minmax",
>>> )
>>> exp.fit(data=data)
>>> exp.loss_comparison()
you may wish to plot on log scale
>>> exp.loss_comparison(ax_kws={'logy':True})
"""
include = self._check_include_arg(include, self.considered_models_)
if self.model_.category == "ML":
raise NotImplementedError(f"Non neural network models can not have loss comparison")
loss_curves = {}
for _model, _path in self.eval_models.items():
if _model in include:
df = pd.read_csv(os.path.join(_path, 'losses.csv'), usecols=[loss_name])
loss_curves[_model] = df.values
end = end or len(df)
ax_kws = {
'xlabel': "Epochs",
'ylabel': 'Loss'}
if len(loss_curves) > 5:
ax_kws['legend_kws'] = {'bbox_to_anchor': (1.1, 0.99)}
_kws = {'linestyle': '-'}
if kwargs is not None:
if 'ax_kws' in kwargs:
ax_kws.update(kwargs.pop('ax_kws'))
_kws.update(kwargs)
_, axis = plt.subplots(figsize=figsize)
for _model, _loss in loss_curves.items():
label = shred_model_name(_model)
plot(_loss[start:end], ax=axis, label=label, show=False, ax_kws=ax_kws, **_kws)
axis.grid(ls='--', color='lightgrey')
if self.save:
fname = os.path.join(self.exp_path, f'loss_comparison_{loss_name}.png')
plt.savefig(fname, dpi=100, bbox_inches='tight')
if self.show:
plt.show()
return axis
def compare_convergence(
self,
name: str = 'convergence_comparison',
**kwargs
) -> Union[plt.Axes, None]:
"""
Plots and compares the convergence plots of hyperparameter optimization runs.
Only valid if `run_type=optimize` during :py:meth:`ai4water.experiments.Experiments.fit`
call.
Parameters
----------
name : str
name of file to save the plot
kwargs :
keyword arguments to plot_ function
Returns
-------
if the optimized models are >1 then it returns the maplotlib axes
on which the figure is drawn otherwise it returns None.
Examples
--------
>>> from ai4water.experiments import MLRegressionExperiments
>>> from ai4water.datasets import busan_beach
>>> experiment = MLRegressionExperiments()
>>> experiment.fit(data=busan_beach(), run_type="optimize", num_iterations=30)
>>> experiment.compare_convergence()
.. _plot:
https://easy-mpl.readthedocs.io/en/latest/plots.html#easy_mpl.plot
"""
if len(self.optimized_models) < 1:
print('No model was optimized')
return
plt.close('all')
fig, axis = plt.subplots()
for _model, opt_path in self.optimized_models.items():
with open(os.path.join(opt_path, 'iterations.json'), 'r') as fp:
iterations = json.load(fp)
convergence = sort_array(list(iterations.keys()))
label = shred_model_name(_model)
_kws = dict(
linestyle='--',
ax_kws = dict(xlabel='Number of iterations $n$',
ylabel=r"$\min f(x)$ after $n$ calls",
label=label)
)
if kwargs is not None:
_kws.update(kwargs)
plot(
convergence,
ax=axis,
show=False,
**_kws
)
if self.save:
fname = os.path.join(self.exp_path, f'{name}.png')
plt.savefig(fname, dpi=100, bbox_inches='tight')
if self.show:
plt.show()
return axis
def _load_model(self, model_name:str):
"""
builds the model from config and then update the weights
and returns it
"""
m_path = self._get_best_model_path(model_name)
c_path = os.path.join(m_path, 'config.json')
model = self.build_from_config(c_path)
# calculate pr curve for each model
self.update_model_weight(model, m_path)
return model
def compare_edf_plots(
self,
x=None,
y=None,
data=None,
exclude:Union[list, str] = None,
figsize=None,
fname: Optional[str] = "edf",
**kwargs
):
"""compare EDF plots of all the models which have been fitted.
This plot is only available for regression problems.
parameters
----------
x :
input data
y :
target data
data :
raw unprocessed data from which x,y pairs of the test data are drawn
exclude : list
name of models to exclude from plotting
figsize :
figure size as (width, height)
fname : str, optional
name of the file to save plot
**kwargs
any keword arguments for `py:meth:ai4water.utils.utils.edf_plot`
Returns
-------
plt.Figure
matplotlib
Example
-------
>>> from ai4water.experiments import MLRegressionExperiments
>>> from ai4water.datasets import busan_beach
>>> dataset = busan_beach()
>>> inputs = list(dataset.columns)[0:-1]
>>> outputs = list(dataset.columns)[-1]
>>> experiment = MLRegressionExperiments(input_features=inputs, output_features=outputs)
>>> experiment.fit(data=dataset, include="LMs")
>>> experiment.compare_edf_plots(data=dataset, exclude="SGDRegressor")
"""
assert self.mode == "regression", f"This plot is not available for {self.mode} mode"
_, _, _, _, x, y = self.verify_data(data=data, test_data=(x, y))
model_folders = self._get_model_folders()
if exclude is None:
exclude = []
elif isinstance(exclude, str):
exclude = [exclude]
fig, axes = plt.subplots(figsize=figsize)
# load all models from config
for model_name in model_folders:
if model_name not in exclude:
model = self._load_model(model_name)
true, prediction = model.predict(x, y, return_true=True,
process_results=False)
assert len(true) == true.size
assert len(prediction) == prediction.size
error = np.abs(true.reshape(-1,) - prediction.reshape(-1,))
if model_name.endswith("Regressor"):
label = model_name.split("Regressor")[0]
elif model_name.endswith("Classifier"):
label = model_name.split("Classifier")[0]
else:
label = model_name
edf_plot(error, xlabel="Absolute Error", ax=axes, label=label,
show=False, **kwargs)
axes.grid(ls='--', color='lightgrey')
if len(model_folders)>7:
axes.legend(loc=(1.05, 0.0))
if self.save:
fname = os.path.join(self.exp_path, f'{fname}.png')
plt.savefig(fname, dpi=600, bbox_inches='tight')
if self.show:
plt.show()
return
def compare_regression_plots(
self,
x=None,
y=None,
data=None,
include: Union[None, list] = None,
figsize: tuple=None,
fname: Optional[str] = "regression",
**kwargs
)->plt.Figure:
"""compare regression plots of all the models which have been fitted.
This plot is only available for regression problems.
parameters
----------
x :
input data
y :
target data
data :
raw unprocessed data from which x,y pairs of the test data are drawn
include : str, list, optional
if not None, must be a list of models which will be included.
None will result in plotting all the models.
figsize :
figure size as (width, length)
fname : str, optional
name of the file to save the plot
**kwargs
any keyword arguments for `obj`:easy_mpl.reg_plot
Returns
-------
plt.Figure
matplotlib
Example
-------
>>> from ai4water.experiments import MLRegressionExperiments
>>> from ai4water.datasets import busan_beach
>>> dataset = busan_beach()
>>> inputs = list(dataset.columns)[0:-1]
>>> outputs = list(dataset.columns)[-1]
>>> experiment = MLRegressionExperiments(input_features=inputs, output_features=outputs)
>>> experiment.fit(data=dataset)
>>> experiment.compare_regression_plots(data=dataset)
"""
assert self.mode == "regression", f"""
This plot is not available for {self.mode} mode"""
_, _, _, _, x, y = self.verify_data(data=data, test_data=(x, y))
model_folders = self._get_model_folders()
include = self._check_include_arg(include, self.considered_models_)
fig, axes = create_subplots(naxes=len(include),
figsize=figsize, sharex="all")
if not isinstance(axes, np.ndarray):
axes = np.array(axes)
# load all models from config
for model_name, ax in zip(include, axes.flat):
model_name = model_name.split('model_')[1]
model = self._load_model(model_name)
true, prediction = model.predict(x, y, return_true=True,
process_results=False)
if np.isnan(prediction).sum() == prediction.size:
if self.verbosity>=0:
print(f"Model {model_name} only predicted nans")
continue
reg_plot(true, prediction, marker_size=5, ax=ax, show=False,
**kwargs)
ax.set_xlabel('')
ax.set_ylabel('')
if model_name.endswith("Regressor"):
label = model_name.split("Regressor")[0]
elif model_name.endswith("Classifier"):
label = model_name.split("Classifier")[0]
else:
label = model_name
ax.legend(labels=[label],
fontsize=9,
numpoints=2,
fancybox=False,
framealpha=0.0)
if hasattr(fig, "supxlabel"):
fig.supxlabel("Observed", fontsize=14)
fig.supylabel("Predicted", fontsize=14)
if self.save:
fname = os.path.join(self.exp_path, f'{fname}.png')
plt.savefig(fname, dpi=600, bbox_inches='tight')
if self.show:
plt.show()
return fig
def compare_residual_plots(
self,
x=None,
y=None,
data = None,
include: Union[None, list] = None,
figsize: tuple = None,
fname: Optional[str] = "residual"
)->plt.Figure:
"""compare residual plots of all the models which have been fitted.
This plot is only available for regression problems.
parameters
----------
x :
input data
y :
target data
data :
raw unprocessed data frmm which test x,y pairs are drawn using
:py:meth:`ai4water.preprocessing.DataSet`. class. Only valid if x and y are not given.
include : str, list, optional
if not None, must be a list of models which will be included.
None will result in plotting all the models.
figsize : tuple
figure size as (width, height)
fname : str, optional
name of file to save the plot
Returns
-------
plt.Figure
matplotlib
Example
-------
>>> from ai4water.experiments import MLRegressionExperiments
>>> from ai4water.datasets import busan_beach
>>> dataset = busan_beach()
>>> inputs = list(dataset.columns)[0:-1]
>>> outputs = list(dataset.columns)[-1]
>>> experiment = MLRegressionExperiments(input_features=inputs, output_features=outputs)
>>> experiment.fit(data=dataset)
>>> experiment.compare_residual_plots(data=dataset)
"""
assert self.mode == "regression", f"This plot is not available for {self.mode} mode"
include = self._check_include_arg(include, self.considered_models_)
_, _, _, _, x, y = self.verify_data(data=data, test_data=(x, y))
model_folders = self._get_model_folders()
fig, axes = create_subplots(naxes=len(include), figsize=figsize)
if not isinstance(axes, np.ndarray):
axes = np.array(axes)
# load all models from config
for model_and_name, ax in zip(include, axes.flat):
model_name = model_and_name.split('model_')[1]
model = self._load_model(model_name)
true, prediction = model.predict(x, y, return_true=True, process_results=False)
plot(
prediction,
true - prediction,
'o',
show=False,
ax=ax,
color="darksalmon",
markerfacecolor=np.array([225, 121, 144]) / 256.0,
markeredgecolor="black",
markeredgewidth=0.15,
markersize=1.5,
)
# draw horizontal line on y=0
ax.axhline(0.0)
if model_name.endswith("Regressor"):
label = model_name.split("Regressor")[0]
elif model_name.endswith("Classifier"):
label = model_name.split("Classifier")[0]
else:
label = model_name
ax.legend(labels=[label], fontsize=9,
numpoints=2,
fancybox=False, framealpha=0.0)
if hasattr(fig, "supxlabel"):
fig.supxlabel("Prediction")
fig.supylabel("Residual")
if self.save:
fname = os.path.join(self.exp_path, f'{fname}.png')
plt.savefig(fname, dpi=600, bbox_inches='tight')
if self.show:
plt.show()
return fig
@classmethod
def from_config(
cls,
config_path: str,
**kwargs
) -> "Experiments":
"""
Loads the experiment from the config file.
Arguments:
config_path : complete path of experiment
kwargs : keyword arguments to experiment
Returns:
an instance of `Experiments` class
"""
if not config_path.endswith('.json'):
raise ValueError(f"""
{config_path} is not a json file
""")
with open(config_path, 'r') as fp:
config = json.load(fp)
cv_scores = {}
scoring = "mse"
for model_name, model_path in config['eval_models'].items():
with open(os.path.join(model_path, 'config.json'), 'r') as fp:
model_config = json.load(fp)
# if cross validation was performed, then read those results.
cross_validator = model_config['config']['cross_validator']
if cross_validator is not None:
cv_name = str(list(cross_validator.keys())[0])
scoring = model_config['config']['val_metric']
cv_fname = os.path.join(model_path, f'{cv_name}_{scoring}' + ".json")
if os.path.exists(cv_fname):
with open(cv_fname, 'r') as fp:
cv_scores[model_name] = json.load(fp)
exp = cls(exp_name=config['exp_name'], cases=config['cases'], **kwargs)
#exp.config = config
exp._from_config = True
# following four attributes are only available if .fit was run
exp.considered_models_ = config.get('considered_models_', [])
exp.is_binary_ = config.get('is_binary_', None)
exp.is_multiclass_ = config.get('is_multiclass_', None)
exp.is_multilabel_ = config.get('is_multilabel_', None)
exp.metrics = load_json_file(
os.path.join(os.path.dirname(config_path), "metrics.json"))
exp.features = load_json_file(
os.path.join(os.path.dirname(config_path), "features.json"))
exp.cv_scores_ = cv_scores
exp._cv_scoring = scoring
return exp
def plot_cv_scores(
self,
name: str = "cv_scores",
exclude: Union[str, list] = None,
include: Union[str, list] = None,
**kwargs
) -> Union[plt.Axes, None]:
"""
Plots the box whisker plots of the cross validation scores.
This plot is only available if cross_validation was set to True during
:py:meth:`ai4water.experiments.Experiments.fit`.
Arguments
---------
name : str
name of the file to save the plot
include : str/list
models to include
exclude : models to exclude
**kwargs : any of the following keyword arguments
- notch
- vert
- figsize
- bbox_inches
Returns
-------
matplotlib axes if the figure is drawn otherwise None
Example
-------
>>> from ai4water.experiments import MLRegressionExperiments
>>> from ai4water.datasets import busan_beach
>>> exp = MLRegressionExperiments(cross_validator={"KFold": {"n_splits": 10}})
>>> exp.fit(data=busan_beach(), cross_validate=True)
>>> exp.plot_cv_scores()
"""
if len(self.cv_scores_) == 0:
return
scoring = self._cv_scoring
cv_scores = self.cv_scores_
consider_exclude(exclude, self.models, cv_scores)
cv_scores = self._consider_include(include, cv_scores)
model_names = [m.split('model_')[1] for m in list(cv_scores.keys())]
if len(model_names) < 5:
rotation = 0
else:
rotation = 90
plt.close()
_, axis = plt.subplots(figsize=kwargs.get('figsize', (8, 6)))
axis.boxplot(np.array(list(cv_scores.values())).squeeze().T,
notch=kwargs.get('notch', None),
vert=kwargs.get('vert', None),
labels=model_names
)
axis.set_xticklabels(model_names, rotation=rotation)
axis.set_xlabel("Models", fontsize=16)
axis.set_ylabel(ERROR_LABELS.get(scoring, scoring), fontsize=16)
fname = os.path.join(os.getcwd(),
f'results{SEP}{self.exp_name}{SEP}{name}_{len(model_names)}.png')
if self.save:
plt.savefig(fname, dpi=300, bbox_inches=kwargs.get('bbox_inches', 'tight'))
if self.show:
plt.show()
return axis
def _compare_cls_curves(
self, x, y, func, name,
figsize:tuple=None,
**kwargs
):
assert self.mode == "classification", f"""
{name} is only available for classification mode."""
model_folders = [p for p in os.listdir(self.exp_path) if os.path.isdir(os.path.join(self.exp_path, p))]
_, ax = plt.subplots(figsize=figsize)
# find all the model folders
m_paths = []
for m in model_folders:
if any(m in m_ for m_ in self.considered_models_):
m_paths.append(m)
nplots = 0
# load all models from config
for model_name in m_paths:
model = self._load_model(model_name)
kws = {'estimator': model,
'X': x,
'y': y.reshape(-1, ),
'ax': ax,
'name': model.model_name
}
if kwargs:
kws.update(kwargs)
if 'LinearSVC' in model.model_name:
# sklearn LinearSVC does not have predict_proba
# but ai4water Model does have this method
# which will only throw error
kws['estimator'] = model._model
if model.model_name in ['Perceptron', 'PassiveAggressiveClassifier',
'NearestCentroid', 'RidgeClassifier',
'RidgeClassifierCV']:
continue
if model.model_name in ['NuSVC', 'SVC']:
if not model._model.get_params()['probability']:
continue
if 'SGDClassifier' in model.model_name:
if model._model.get_params()['loss'] == 'hinge':
continue
func(**kws)
nplots += 1
ax.grid(ls='--', color='lightgrey')
if nplots>5:
plt.legend(bbox_to_anchor=(1.1, 0.99))
if self.save:
fname = os.path.join(self.exp_path, f"{name}.png")
plt.savefig(fname, dpi=300, bbox_inches='tight')
if self.show:
plt.show()
return ax
def compare_precision_recall_curves(
self,
x,
y,
figsize:tuple=None,
**kwargs
):
"""compares precision recall curves of the all the models.
parameters
----------
x :
input data
y :
labels for the input data
figsize : tuple
figure size
**kwargs :
any keyword arguments for :obj:matplotlib.plot function
Returns
-------
plt.Axes
matplotlib axes on which figure is drawn
Example
-------
>>> from ai4water.datasets import MtropicsLaos
>>> from ai4water.experiments import MLClassificationExperiments
>>> data = MtropicsLaos().make_classification(lookback_steps=1)
# define inputs and outputs
>>> inputs = data.columns.tolist()[0:-1]
>>> outputs = data.columns.tolist()[-1:]
# initiate the experiment
>>> exp = MLClassificationExperiments(
... input_features=inputs,
... output_features=outputs)
# run the experiment
>>> exp.fit(data=data, include=["model_LGBMClassifier",
... "model_XGBClassifier",
... "RandomForestClassifier"])
... # Compare Precision Recall curves
>>> exp.compare_precision_recall_curves(data[inputs].values, data[outputs].values)
"""
return self._compare_cls_curves(
x,
y,
name="precision_recall_curves",
func=sklearn.metrics.PrecisionRecallDisplay.from_estimator,
figsize=figsize,
**kwargs
)
def compare_roc_curves(
self,
x,
y,
figsize:tuple=None,
**kwargs
):
"""compares roc curves of the all the models.
parameters
----------
x :
input data
y :
labels for the input data
figsize : tuple
figure size
**kwargs :
any keyword arguments for :obj:matplotlib.plot function
Returns
-------
plt.Axes
matplotlib axes on which figure is drawn
Example
-------
>>> from ai4water.datasets import MtropicsLaos
>>> from ai4water.experiments import MLClassificationExperiments
>>> data = MtropicsLaos().make_classification(lookback_steps=1)
# define inputs and outputs
>>> inputs = data.columns.tolist()[0:-1]
>>> outputs = data.columns.tolist()[-1:]
# initiate the experiment
>>> exp = MLClassificationExperiments(
... input_features=inputs,
... output_features=outputs)
# run the experiment
>>> exp.fit(data=data, include=["model_LGBMClassifier",
... "model_XGBClassifier",
... "RandomForestClassifier"])
... # Compare ROC curves
>>> exp.compare_roc_curves(data[inputs].values, data[outputs].values)
"""
return self._compare_cls_curves(
x=x,
y=y,
name="roc_curves",
func=sklearn.metrics.RocCurveDisplay.from_estimator,
figsize=figsize,
**kwargs
)
def sort_models_by_metric(
self,
metric_name,
cutoff_val=None,
cutoff_type=None,
ignore_nans: bool = True,
sort_by="test"
) -> pd.DataFrame:
"""returns the models sorted according to their performance"""
idx = list(self.metrics.keys())
metrics = dict()
metrics['train'] = np.array([v['train'][metric_name] for v in self.metrics.values()])
if 'test' in list(self.metrics.values())[0]:
metrics['test'] = np.array([v['test'][metric_name] for v in self.metrics.values()])
else:
metrics['val'] = np.array([v['val'][metric_name] for v in self.metrics.values()])
if 'test' not in metrics and sort_by == "test":
sort_by = "val"
df = pd.DataFrame(metrics, index=idx)
if ignore_nans:
df = df.dropna()
df = df.sort_values(by=[sort_by], ascending=False)
if cutoff_type is not None:
assert cutoff_val is not None
if cutoff_type == "greater":
df = df.loc[df[sort_by] > cutoff_val]
else:
df = df.loc[df[sort_by] < cutoff_val]
return df
def fit_with_tpot(
self,
data,
models: Union[int, List[str], dict, str] = None,
selection_criteria: str = 'mse',
scoring: str = None,
**tpot_args
):
"""
Fits the tpot_'s fit method which
finds out the best pipline for the given data.
Arguments
---------
data :
models :
It can be of three types.
- If list, it will be the names of machine learning models/
algorithms to consider.
- If integer, it will be the number of top
algorithms to consider for tpot. In such a case, you must have
first run `.fit` method before running this method. If you run
the tpot using all available models, it will take hours to days
for medium sized data (consisting of few thousand examples). However,
if you run first .fit and see for example what are the top 5 models,
then you can set this argument to 5. In such a case, tpot will search
pipeline using only the top 5 algorithms/models that have been found
using .fit method.
- if dictionary, then the keys should be the names of algorithms/models
and values shoudl be the parameters for each model/algorithm to be
optimized.
- You can also set it to ``all`` consider all models available in
ai4water's Experiment module.
- default is None, which means, the `tpot_config` argument will be None
selection_criteria :
The name of performance metric. If ``models`` is integer, then
according to this performance metric the models will be choosen.
By default the models will be selected based upon their mse values
on test data.
scoring : the performance metric to use for finding the pipeline.
tpot_args :
any keyword argument for tpot's Regressor_ or Classifier_ class.
This can include arguments like ``generations``, ``population_size`` etc.
Returns
-------
the tpot object
Example
-------
>>> from ai4water.experiments import MLRegressionExperiments
>>> from ai4water.datasets import busan_beach
>>> exp = MLRegressionExperiments(exp_name=f"tpot_reg_{dateandtime_now()}")
>>> exp.fit(data=busan_beach())
>>> tpot_regr = exp.fit_with_tpot(busan_beach(), 2, generations=1, population_size=2)
.. _tpot:
http://epistasislab.github.io/tpot/
.. _Regressor:
http://epistasislab.github.io/tpot/api/#regression
.. _Classifier:
http://epistasislab.github.io/tpot/api/#classification
"""
tpot_caller = self.tpot_estimator
assert tpot_caller is not None, f"tpot must be installed"
param_space = {}
tpot_config = None
for m in self.models:
getattr(self, m)()
ps = getattr(self, 'param_space')
path = getattr(self, 'path')
param_space[m] = {path: {p.name: p.grid for p in ps}}
if isinstance(models, int):
assert len(self.metrics) > 1, f"""
you must first run .fit() method in order to choose top {models} models"""
# sort the models w.r.t their performance
sorted_models = self.sort_models_by_metric(selection_criteria)
# get names of models
models = sorted_models.index.tolist()[0:models]
tpot_config = {}
for m in models:
c: dict = param_space[f"{m}"]
tpot_config.update(c)
elif isinstance(models, list):
tpot_config = {}
for m in models:
c: dict = param_space[f"model_{m}"]
tpot_config.update(c)
elif isinstance(models, dict):
tpot_config = {}
for mod_name, mod_paras in models.items():
if "." in mod_name:
mod_path = mod_name
else:
c: dict = param_space[f"model_{mod_name}"]
mod_path = list(c.keys())[0]
d = {mod_path: mod_paras}
tpot_config.update(d)
elif isinstance(models, str) and models == "all":
tpot_config = {}
for mod_name, mod_config in param_space.items():
mod_path = list(mod_config.keys())[0]
mod_paras = list(mod_config.values())[0]
tpot_config.update({mod_path: mod_paras})
fname = os.path.join(self.exp_path, "tpot_config.json")
with open(fname, 'w') as fp:
json.dump(jsonize(tpot_config), fp, indent=True)
tpot = tpot_caller(
verbosity=self.verbosity + 1,
scoring=scoring,
config_dict=tpot_config,
**tpot_args
)
not_allowed_args = ["cross_validator", "wandb_config", "val_metric",
"loss", "optimizer", "lr", "epochs", "quantiles", "patience"]
model_kws = self.model_kws
for arg in not_allowed_args:
if arg in model_kws:
model_kws.pop(arg)
dh = DataSet(data, **model_kws)
train_x, train_y = dh.training_data()
tpot.fit(train_x, train_y.reshape(-1, 1))
if "regressor" in self.tpot_estimator.__name__:
mode = "regression"
else:
mode = "classification"
visualizer = ProcessPredictions(path=self.exp_path,
show=bool(self.verbosity),
mode=mode)
for idx, data_name in enumerate(['training', 'test']):
x_data, y_data = getattr(dh, f"{data_name}_data")(key=str(idx))
pred = tpot.fitted_pipeline_.predict(x_data)
r2 = RegressionMetrics(y_data, pred).r2()
# todo, perform inverse transform and deindexification
visualizer(
pd.DataFrame(y_data.reshape(-1, )),
pd.DataFrame(pred.reshape(-1, )),
)
# save the python code of fitted pipeline
tpot.export(os.path.join(self.exp_path, "tpot_fitted_pipeline.py"))
# save each iteration
fname = os.path.join(self.exp_path, "evaluated_individuals.json")
with open(fname, 'w') as fp:
json.dump(tpot.evaluated_individuals_, fp, indent=True)
return tpot
def _build_fit(
self,
train_x=None,
train_y=None,
validation_data=None,
view=False,
title=None,
cross_validate: bool=False,
refit: bool=False,
**kwargs
)->Model:
model: Model = self._build(title=title, **kwargs)
self._fit(
model,
train_x=train_x,
train_y=train_y,
validation_data=validation_data,
cross_validate=cross_validate,
refit = refit,
)
if view:
self._model.view()
return self.model_
def _build_fit_eval(
self,
train_x=None,
train_y=None,
validation_data:tuple=None,
view=False,
title=None,
cross_validate: bool=False,
refit: bool=False,
**kwargs
)->float:
"""
Builds and run one 'model' of the experiment.
Since an experiment consists of many models, this method
is also run many times.
refit : bool
This means fit on training + validation data. This is true
when we have optimized the hyperparameters and now we would
like to fit on training + validation data as well.
"""
model = self._build_fit(train_x, train_y,
validation_data, view, title,
cross_validate, refit, **kwargs)
# return the validation score
return self._evaluate(model, *validation_data)
def _build(self, title=None, **suggested_paras):
"""Builds the ai4water Model class and makes it a class attribute."""
suggested_paras = self._pre_build_hook(**suggested_paras)
suggested_paras = jsonize(suggested_paras)
verbosity = max(self.verbosity - 1, 0)
if 'verbosity' in self.model_kws:
verbosity = self.model_kws.pop('verbosity')
if self.category == "DL":
model = FModel
else:
model = Model
model = model(
prefix=title,
verbosity=verbosity,
**self.model_kws,
**suggested_paras
)
setattr(self, 'model_', model)
return model
def _fit(
self,
model:Model,
train_x,
train_y,
validation_data,
cross_validate: bool = False,
refit: bool = False
):
"""Trains the model"""
if cross_validate:
return model.cross_val_score(*_combine_training_validation_data(
train_x,
train_y,
validation_data))
if refit:
# we need to combine training (x,y) + validation data.
return model.fit_on_all_training_data(*_combine_training_validation_data(
train_x,
train_y,
validation_data=validation_data))
if self.category == "DL":
model.fit(x=train_x, y=train_y, validation_data=validation_data)
else:
model.fit(x=train_x, y=train_y)
# model_ is used in the class for prediction so it must be the
# updated/trained model
self.model_ = model
return
def _evaluate(
self,
model:Model,
x,
y,
) -> float:
"""Evaluates the model"""
#if validation_data is None:
t, p = model.predict(
x=x, y=y,
return_true=True,
process_results=False)
test_metrics = self._get_metrics(t, p)
metrics = Metrics[self.mode](t, p,
remove_zero=True,
remove_neg=True,
replace_nan=True,
replace_inf=True,
multiclass=self.is_multiclass_)
self.model_iter_metric[self.iter_] = test_metrics
self.iter_ += 1
val_score_ = getattr(metrics, model.val_metric)()
val_score = val_score_
if model.val_metric in [
'r2', 'nse', 'kge', 'r2_mod', 'r2_adj', 'r2_score'
] or self.mode == "classification":
val_score = 1.0 - val_score_
if not math.isfinite(val_score):
val_score = 9999 # TODO, find a better way to handle this
print(f"val_score: {round(val_score, 5)} {model.val_metric}: {val_score_}")
return val_score
def _predict(
self,
model: Model,
x,
y
)->Tuple[np.ndarray, np.ndarray]:
"""
Makes predictions on training and test data from the model.
It is supposed that the model has been trained before."""
true, predicted = model.predict(x, y, return_true=True, process_results=False)
if np.isnan(predicted).sum() == predicted.size:
warnings.warn(f"model {model.model_name} predicted only nans")
else:
ProcessPredictions(self.mode,
forecast_len=model.forecast_len,
path=model.path,
output_features=model.output_features,
plots=self.plots_,
show=bool(model.verbosity),
)(true, predicted)
return true, predicted
def verify_data(
self,
x=None,
y=None,
data=None,
validation_data: tuple = None,
test_data: tuple = None,
) -> tuple:
"""
verifies that either
- only x,y should be given (val will be taken from it according to splitting schemes)
- or x,y and validation_data should be given (means no test data)
- or x, y and validation_data and test_data are given
- or only data should be given (train, validation and test data will be
taken accoring to splitting schemes)
"""
def num_examples(samples):
if isinstance(samples, list):
assert len(set(len(sample) for sample in samples)) == 1
return len(samples[0])
return len(samples)
model_maker = make_model(**self.model_kws)
data_config = model_maker.data_config
if x is None:
assert y is None, f"y must only be given if x is given. x is {type(x)}"
if data is None:
# x,y and data are not given, we may be given test/validation data
train_x, train_y = None, None
if validation_data is None:
val_x, val_y = None, None
else:
val_x, val_y = validation_data
if test_data is None:
test_x, test_y = None, None
else:
test_x, test_y = test_data
else:
# case 4, only data is given
assert data is not None, f"if x is given, data must not be given"
assert validation_data is None, f"validation data must only be given if x is given"
#assert test_data is None, f"test data must only be given if x is given"
data_config.pop('category')
if 'lookback' in self._named_x0() and 'ts_args' not in self.model_kws:
# the value of lookback has been set by model_maker which can be wrong
# because the user expects it to be hyperparameter
data_config['ts_args']['lookback'] = self._named_x0()['lookback']
# when saving is done during initialization of DataSet and verbosity>0
# it prints information two times!
save = data_config.pop('save') or True
dataset = DataSet(data=data,
save=False,
category=self.category,
**data_config)
if save:
verbosity = dataset.verbosity
dataset.verbosity = 0
dataset.to_disk()
dataset.verbosity = verbosity
train_x, train_y = dataset.training_data()
val_x, val_y = dataset.validation_data() # todo what if there is not validation data
test_x, test_y = dataset.test_data()
if len(test_x) == 0:
test_x, test_y = None, None
elif test_data is None and validation_data is None:
# case 1, only x,y are given
assert num_examples(x) == num_examples(y)
splitter= TrainTestSplit(data_config['val_fraction'], seed=data_config['seed'] or 313)
if data_config['split_random']:
train_x, val_x, train_y, val_y = splitter.split_by_random(x, y)
else:
train_x, val_x, train_y, val_y = splitter.split_by_slicing(x, y)
test_x, test_y = None, None
elif test_data is None:
# case 2: x,y and validation_data should be given (means no test data)
assert num_examples(x) == num_examples(y)
train_x, train_y = x, y
val_x, val_y = validation_data
test_x, test_y = None, None
else:
# case 3
assert num_examples(x) == num_examples(y)
train_x, train_y = x, y
val_x, val_y = validation_data
test_x, test_y = test_data
return train_x, train_y, val_x, val_y, test_x, test_y
def _get_model_folders(self):
model_folders = [p for p in os.listdir(self.exp_path) if os.path.isdir(os.path.join(self.exp_path, p))]
# find all the model folders
m_folders = []
for m in model_folders:
if any(m in m_ for m_ in self.considered_models_):
m_folders.append(m)
return m_folders
def _build_predict_from_configs(self, x, y):
model_folders = self._get_model_folders()
# load all models from config
for model_name in model_folders:
model = self._load_model(model_name)
out = model.predict(x=x, y=y, return_true=True, process_results=False)
self._populate_results(f"model_{model_name}", train_results=None, test_results=out)
return
def _get_best_model_path(self, model_name):
m_path = os.path.join(self.exp_path, model_name)
if len(os.listdir(m_path)) == 1:
m_path = os.path.join(m_path, os.listdir(m_path)[0])
elif 'best' in os.listdir(m_path):
# within best folder thre is another folder
m_path = os.path.join(m_path, 'best')
assert len(os.listdir(m_path)) == 1
m_path = os.path.join(m_path, os.listdir(m_path)[0])
else:
folders = [path for path in os.listdir(m_path) if
os.path.isdir(os.path.join(m_path, path)) and path.startswith('1_')]
if len(folders) == 1:
m_path = os.path.join(m_path, folders[0])
else:
raise ValueError(f"Cant find best model in {m_path}")
return m_path
class TransformationExperiments(Experiments):
"""Helper class to conduct experiments with different transformations
Example:
>>> from ai4water.datasets import busan_beach
>>> from ai4water.experiments import TransformationExperiments
>>> from ai4water.hyperopt import Integer, Categorical, Real
... # Define your experiment
>>> class MyTransformationExperiments(TransformationExperiments):
...
... def update_paras(self, **kwargs):
... _layers = {
... "LSTM": {"config": {"units": int(kwargs['lstm_units']}},
... "Dense": {"config": {"units": 1, "activation": kwargs['dense_actfn']}},
... "reshape": {"config": {"target_shape": (1, 1)}}
... }
... return {'model': {'layers': _layers},
... 'lookback': int(kwargs['lookback']),
... 'batch_size': int(kwargs['batch_size']),
... 'lr': float(kwargs['lr']),
... 'transformation': kwargs['transformation']}
>>> data = busan_beach()
>>> inputs = ['tide_cm', 'wat_temp_c', 'sal_psu', 'air_temp_c', 'pcp_mm', 'pcp3_mm']
>>> outputs = ['tetx_coppml']
>>> cases = {'model_minmax': {'transformation': 'minmax'},
... 'model_zscore': {'transformation': 'zscore'}}
>>> search_space = [
... Integer(low=16, high=64, name='lstm_units', num_samples=2),
... Integer(low=3, high=15, name="lookback", num_samples=2),
... Categorical(categories=[4, 8, 12, 16, 24, 32], name='batch_size'),
... Real(low=1e-6, high=1.0e-3, name='lr', prior='log', num_samples=2),
... Categorical(categories=['relu', 'elu'], name='dense_actfn'),
... ]
>>> x0 = [20, 14, 12, 0.00029613, 'relu']
>>> experiment = MyTransformationExperiments(cases=cases, input_features=inputs,
... output_features=outputs, exp_name="testing"
... param_space=search_space, x0=x0)
"""
@property
def mode(self):
return "regression"
@property
def category(self):
return "ML"
def __init__(self,
param_space=None,
x0=None,
cases: dict = None,
exp_name: str = None,
num_samples: int = 5,
verbosity: int = 1,
**model_kws):
self.param_space = param_space
self.x0 = x0
exp_name = exp_name or 'TransformationExperiments' + f'_{dateandtime_now()}'
super().__init__(
cases=cases,
exp_name=exp_name,
num_samples=num_samples,
verbosity=verbosity,
**model_kws
)
@property
def tpot_estimator(self):
return None
def update_paras(self, **suggested_paras):
raise NotImplementedError(f"""
You must write the method `update_paras` which should build the Model with suggested parameters
and return the keyword arguments including `model`. These keyword arguments will then
be used to build ai4water's Model class.
""")
def _build(self, title=None, **suggested_paras):
"""Builds the ai4water Model class"""
suggested_paras = jsonize(suggested_paras)
verbosity = max(self.verbosity - 1, 0)
if 'verbosity' in self.model_kws:
verbosity = self.model_kws.pop('verbosity')
model = Model(
prefix=title,
verbosity=verbosity,
**self.update_paras(**suggested_paras),
**self.model_kws
)
setattr(self, 'model_', model)
return model
def process_model_before_fit(self, model):
"""So that the user can perform processing of the model by overwriting this method"""
return model
def sort_array(array):
"""
array: [4, 7, 3, 9, 4, 8, 2, 8, 7, 1]
returns: [4, 4, 3, 3, 3, 3, 2, 2, 2, 1]
"""
results = np.array(array, dtype=np.float32)
iters = range(1, len(results) + 1)
return [np.min(results[:i]) for i in iters]
def consider_exclude(exclude: Union[str, list],
models,
models_to_filter: Union[list, dict] = None
):
if isinstance(exclude, str):
exclude = [exclude]
if exclude is not None:
exclude = ['model_' + _model if not _model.startswith('model_') else _model for _model in exclude]
for elem in exclude:
assert elem in models, f"""
{elem} to `exclude` is not available.
Available models are {models} and you wanted to exclude
{exclude}"""
if models_to_filter is not None:
# maybe the model has already been removed from models_to_filter
# when we considered include keyword argument.
if elem in models_to_filter:
if isinstance(models_to_filter, list):
models_to_filter.remove(elem)
else:
models_to_filter.pop(elem)
else:
assert elem in models, f'{elem} is not in models'
return
def load_json_file(fpath):
with open(fpath, 'r') as fp:
result = json.load(fp)
return result
def save_json_file(fpath, obj):
with open(fpath, 'w') as fp:
json.dump(jsonize(obj), fp, sort_keys=True, indent=4)
def shred_model_name(model_name):
key = model_name[6:] if model_name.startswith('model_') else model_name
key = key[0:-9] if key.endswith("Regressor") else key
return key
def _combine_training_validation_data(
x_train,
y_train,
validation_data=None,
)->tuple:
"""
combines x,y pairs of training and validation data.
"""
if validation_data is None:
return x_train, y_train
x_val, y_val = validation_data
if isinstance(x_train, list):
x = []
for val in range(len(x_train)):
if x_val is not None:
_val = np.concatenate([x_train[val], x_val[val]])
x.append(_val)
else:
_val = x_train[val]
y = y_train
if hasattr(y_val, '__len__') and len(y_val) > 0:
y = np.concatenate([y_train, y_val])
elif isinstance(x_train, np.ndarray):
x, y = x_train, y_train
# if not validation data is available then use only training data
if x_val is not None:
if hasattr(x_val, '__len__') and len(x_val)>0:
x = np.concatenate([x_train, x_val])
y = np.concatenate([y_train, y_val])
else:
raise NotImplementedError
return x, y | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/experiments/_main.py | _main.py |
__all__ = ["MLClassificationExperiments"]
from ._main import Experiments
from .utils import classification_space
from ai4water.utils.utils import dateandtime_now
from ai4water.backend import catboost, xgboost, lightgbm
class MLClassificationExperiments(Experiments):
"""Runs classification models for comparison, with or without
optimization of hyperparameters. It compares around 30 classification
algorithms from sklearn, xgboost, catboost and lightgbm.
Examples
--------
>>> from ai4water.datasets import MtropicsLaos
>>> from ai4water.experiments import MLClassificationExperiments
>>> data = MtropicsLaos().make_classification(lookback_steps=2)
>>> inputs = data.columns.tolist()[0:-1]
>>> outputs = data.columns.tolist()[-1:]
>>> exp = MLClassificationExperiments(input_features=inputs,
>>> output_features=outputs)
>>> exp.fit(data=data, include=["CatBoostClassifier", "LGBMClassifier",
>>> 'RandomForestClassifier', 'XGBClassifier'])
>>> exp.compare_errors('accuracy', data=data)
"""
def __init__(
self,
param_space=None,
x0=None,
cases=None,
exp_name='MLClassificationExperiments',
num_samples=5,
monitor = None,
**model_kws
):
"""
Parameters
----------
param_space : list, optional
x0 : list, optional
cases : dict, optional
exp_name : str, optional
name of experiment
num_samples : int, optional
monitor : list/str, optional
**model_kws :
keyword arguments for :py:class:`ai4water.Model` class
"""
self.param_space = param_space
self.x0 = x0
self.spaces = classification_space(num_samples=num_samples,)
if exp_name == "MLClassificationExperiments":
exp_name = f"{exp_name}_{dateandtime_now()}"
super().__init__(
cases=cases,
exp_name=exp_name,
num_samples=num_samples,
monitor=monitor,
**model_kws
)
if catboost is None:
self.models.remove('model_CatBoostClassifier')
if lightgbm is None:
self.models.remove('model_LGBMClassifier')
if xgboost is None:
self.models.remove('model_XGBRFClassifier')
self.models.remove('model_XGBClassifier')
@property
def tpot_estimator(self):
try:
from tpot import TPOTClassifier
except (ModuleNotFoundError, ImportError):
TPOTClassifier = None
return TPOTClassifier
@property
def mode(self):
return "classification"
@property
def category(self):
return "ML"
def metric_kws(self, metric_name:str=None):
kws = {
'precision': {'average': 'macro'},
'recall': {'average': 'macro'},
'f1_score': {'average': 'macro'},
}
return kws.get(metric_name, {})
def model_AdaBoostClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostClassifier.html
self.path = "sklearn.ensemble.AdaBoostClassifier"
self.param_space = self.spaces["AdaBoostClassifier"]["param_space"]
self.x0 = self.spaces["AdaBoostClassifier"]["x0"]
return {'model': {'AdaBoostClassifier': kwargs}}
def model_BaggingClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.BaggingClassifier.html
self.path = "sklearn.ensemble.BaggingClassifier"
self.param_space = self.spaces["BaggingClassifier"]["param_space"]
self.x0 = self.spaces["BaggingClassifier"]["x0"]
return {'model': {'BaggingClassifier': kwargs}}
def model_BernoulliNB(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.BernoulliNB.html
self.path = "sklearn.naive_bayes.BernoulliNB"
self.param_space = self.spaces["BernoulliNB"]["param_space"]
self.x0 = self.spaces["BernoulliNB"]["x0"]
return {'model': {'BernoulliNB': kwargs}}
def model_CalibratedClassifierCV(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.calibration.CalibratedClassifierCV.html
self.path = "sklearn.calibration.CalibratedClassifierCV"
self.param_space = self.spaces["CalibratedClassifierCV"]["param_space"]
self.x0 = self.spaces["CalibratedClassifierCV"]["x0"]
return {'model': {'CalibratedClassifierCV': kwargs}}
# def model_CheckingClassifier(self, **kwargs):
# return
def model_DecisionTreeClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html
self.path = "sklearn.tree.DecisionTreeClassifier"
self.param_space = self.spaces["DecisionTreeClassifier"]["param_space"]
self.x0 = self.spaces["DecisionTreeClassifier"]["x0"]
return {'model': {'DecisionTreeClassifier': kwargs}}
def model_DummyClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html
self.path = "sklearn.dummy.DummyClassifier"
self.param_space = self.spaces["DummyClassifier"]["param_space"]
self.x0 = self.spaces["DummyClassifier"]["x0"]
return {'model': {'DummyClassifier': kwargs}}
def model_ExtraTreeClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.tree.ExtraTreeClassifier.html
self.path = "sklearn.tree.ExtraTreeClassifier"
self.param_space = self.spaces["ExtraTreeClassifier"]["param_space"]
self.x0 = self.spaces["ExtraTreeClassifier"]["x0"]
return {'model': {'ExtraTreeClassifier': kwargs}}
def model_ExtraTreesClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
self.path = "sklearn.ensemble.ExtraTreesClassifier"
self.param_space = self.spaces["ExtraTreesClassifier"]["param_space"]
self.x0 = self.spaces["ExtraTreesClassifier"]["x0"]
return {'model': {'ExtraTreesClassifier': kwargs}}
def model_GaussianProcessClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyClassifier.html
self.path = "sklearn.gaussian_process.GaussianProcessClassifier"
self.param_space = self.spaces["GaussianProcessClassifier"]["param_space"]
self.x0 = self.spaces["GaussianProcessClassifier"]["x0"]
return {'model': {'GaussianProcessClassifier': kwargs}}
def model_GradientBoostingClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.tree.ExtraTreeClassifier.html
self.path = "sklearn.ensemble.GradientBoostingClassifier"
self.param_space = self.spaces["GradientBoostingClassifier"]["param_space"]
self.x0 = self.spaces["GradientBoostingClassifier"]["x0"]
return {'model': {'GradientBoostingClassifier': kwargs}}
def model_HistGradientBoostingClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html
self.path = "sklearn.ensemble.HistGradientBoostingClassifier"
self.param_space = self.spaces["HistGradientBoostingClassifier"]["param_space"]
self.x0 = self.spaces["HistGradientBoostingClassifier"]["x0"]
return {'model': {'HistGradientBoostingClassifier': kwargs}}
def model_KNeighborsClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
self.path = "sklearn.neighbors.KNeighborsClassifier"
self.param_space = self.spaces["KNeighborsClassifier"]["param_space"]
self.x0 = self.spaces["KNeighborsClassifier"]["x0"]
return {'model': {'KNeighborsClassifier': kwargs}}
def model_LabelPropagation(self, **kwargs):
## https://scikit-learn.org/stable/modules/generated/sklearn.semi_supervised.LabelPropagation.html
self.path = "sklearn.semi_supervised.LabelPropagation"
self.param_space = self.spaces["LabelPropagation"]["param_space"]
self.x0 = self.spaces["LabelPropagation"]["x0"]
return {'model': {'LabelPropagation': kwargs}}
def model_LabelSpreading(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.semi_supervised.LabelSpreading.html
self.path = "sklearn.semi_supervised.LabelSpreading"
self.param_space = self.spaces["LabelSpreading"]["param_space"]
self.x0 = self.spaces["LabelSpreading"]["x0"]
return {'model': {'LabelSpreading': kwargs}}
def model_LGBMClassifier(self, **kwargs):
# https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMClassifier.html
self.path = "lightgbm.LGBMClassifier"
self.param_space = self.spaces["LGBMClassifier"]["param_space"]
self.x0 = self.spaces["LGBMClassifier"]["x0"]
return {'model': {'LGBMClassifier': kwargs}}
def model_LinearDiscriminantAnalysis(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.LinearDiscriminantAnalysis.html
self.path = "sklearn.discriminant_analysis.LinearDiscriminantAnalysis"
self.param_space = self.spaces["LinearDiscriminantAnalysis"]["param_space"]
self.x0 = self.spaces["LinearDiscriminantAnalysis"]["x0"]
return {'model': {'LinearDiscriminantAnalysis': kwargs}}
def model_LinearSVC(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html
self.path = "sklearn.svm.LinearSVC"
self.param_space = self.spaces["LinearSVC"]["param_space"]
self.x0 = self.spaces["LinearSVC"]["x0"]
return {'model': {'LinearSVC': kwargs}}
def model_LogisticRegression(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
self.path = "sklearn.linear_model.LogisticRegression"
self.param_space = self.spaces["LogisticRegression"]["param_space"]
self.x0 = self.spaces["LogisticRegression"]["x0"]
return {'model': {'LogisticRegression': kwargs}}
def model_MLPClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
self.path = "sklearn.neural_network.MLPClassifier"
self.param_space = self.spaces["MLPClassifier"]["param_space"]
self.x0 = self.spaces["MLPClassifier"]["x0"]
return {'model': {'MLPClassifier': kwargs}}
def model_NearestCentroid(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestCentroid.html
self.path = "sklearn.neighbors.NearestCentroid"
self.param_space = self.spaces["NearestCentroid"]["param_space"]
self.x0 = self.spaces["NearestCentroid"]["x0"]
return {'model': {'NearestCentroid': kwargs}}
def model_NuSVC(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.svm.NuSVC.html
self.path = "sklearn.svm.NuSVC"
self.param_space = self.spaces["NuSVC"]["param_space"]
self.x0 = self.spaces["NuSVC"]["x0"]
return {'model': {'NuSVC': kwargs}}
def model_PassiveAggressiveClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.PassiveAggressiveClassifier.html
self.path = "sklearn.linear_model.PassiveAggressiveClassifier"
self.param_space = self.spaces["PassiveAggressiveClassifier"]["param_space"]
self.x0 = self.spaces["PassiveAggressiveClassifier"]["x0"]
return {'model': {'PassiveAggressiveClassifier': kwargs}}
def model_Perceptron(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Perceptron.html
self.path = "sklearn.linear_model.Perceptron"
self.param_space = self.spaces["Perceptron"]["param_space"]
self.x0 = self.spaces["Perceptron"]["x0"]
return {'model': {'Perceptron': kwargs}}
def model_QuadraticDiscriminantAnalysis(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis.html
self.path = "sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis"
self.param_space = self.spaces["QuadraticDiscriminantAnalysis"]["param_space"]
self.x0 = self.spaces["QuadraticDiscriminantAnalysis"]["x0"]
return {'model': {'QuadraticDiscriminantAnalysis': kwargs}}
# def model_RadiusNeighborsClassifier(self, **kwargs):
# # https://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis.html
#
# self.path = "sklearn.neighbors.RadiusNeighborsClassifier"
# self.param_space = self.spaces["RadiusNeighborsClassifier"]["param_space"]
# self.x0 = self.spaces["RadiusNeighborsClassifier"]["x0"]
#
# return {'model': {'RadiusNeighborsClassifier': kwargs}}
def model_RandomForestClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
self.path = "sklearn.ensemble.RandomForestClassifier"
self.param_space = self.spaces["RandomForestClassifier"]["param_space"]
self.x0 = self.spaces["RandomForestClassifier"]["x0"]
return {'model': {'RandomForestClassifier': kwargs}}
def model_RidgeClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeClassifier.html
self.path = "sklearn.linear_model.RidgeClassifier"
self.param_space = self.spaces["RidgeClassifier"]["param_space"]
self.x0 = self.spaces["RidgeClassifierCV"]["x0"]
return {'model': {'RidgeClassifier': kwargs}}
def model_RidgeClassifierCV(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeClassifierCV.html
self.path = "sklearn.linear_model.RidgeClassifierCV"
self.param_space = self.spaces["RidgeClassifierCV"]["param_space"]
self.x0 = self.spaces["RidgeClassifierCV"]["x0"]
return {'model': {'RidgeClassifierCV': kwargs}}
def model_SGDClassifier(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html
self.path = "sklearn.linear_model.SGDClassifier"
self.param_space = self.spaces["SGDClassifier"]["param_space"]
self.x0 = self.spaces["SGDClassifier"]["x0"]
return {'model': {'SGDClassifier': kwargs}}
def model_SVC(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
self.path = "sklearn.svm.SVC"
self.param_space = self.spaces["SVC"]["param_space"]
self.x0 = self.spaces["SVC"]["x0"]
return {'model': {'SVC': kwargs}}
def model_XGBClassifier(self, **kwargs):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html
self.path = "xgboost.XGBClassifier"
self.param_space = self.spaces["XGBClassifier"]["param_space"]
self.x0 = self.spaces["XGBClassifier"]["x0"]
return {'model': {'XGBClassifier': kwargs}}
def model_XGBRFClassifier(self, **kwargs):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.XGBRFClassifier
self.path = "xgboost.XGBRFClassifier"
self.param_space = self.spaces["XGBRFClassifier"]["param_space"]
self.x0 = self.spaces["XGBRFClassifier"]["x0"]
return {'model': {'XGBRFClassifier': kwargs}}
def model_CatBoostClassifier(self, **suggestions):
# https://catboost.ai/en/docs/concepts/python-reference_catboostclassifier
self.path = "catboost.CatBoostClassifier"
self.param_space = self.spaces["CatBoostClassifier"]["param_space"]
self.x0 = self.spaces["CatBoostClassifier"]["x0"]
return {'model': {'CatBoostClassifier': suggestions}} | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/experiments/_cls.py | _cls.py |
__all__ = ["MLRegressionExperiments"]
from ai4water.utils.utils import get_version_info, dateandtime_now
from ._main import Experiments
from .utils import regression_space
from ai4water.backend import xgboost, lightgbm, catboost, sklearn
VERSION_INFO = get_version_info(sklearn=sklearn)
class MLRegressionExperiments(Experiments):
"""
Compares peformance of 40+ machine learning models for a regression problem.
The experiment consists of `models` which are run using `fit()` method. A `model`
is one experiment.
The user can define new `models` by subclassing this class. In fact any new
method in the sub-class which starts with `model_` wll be considered
as a new `model`. Otherwise the user has to overwite the attribute `models` to
redefine, which methods (of class) are to be used as models and which should not. The
method which is a `model` must only return key word arguments which will be
streamed to the `Model` using `build_and_run` method. Inside this new method
the user must define, which parameters to optimize, their param_space for optimization
and the initial values to use for optimization.
"""
def __init__(self,
param_space=None,
x0=None,
cases=None,
exp_name='MLRegressionExperiments',
num_samples=5,
verbosity=1,
**model_kws):
"""
Initializes the class
Arguments:
param_space: dimensions of parameters which are to be optimized. These
can be overwritten in `models`.
x0 list: initial values of the parameters which are to be optimized.
These can be overwritten in `models`
exp_name str: name of experiment, all results will be saved within this folder
model_kws dict: keyword arguments which are to be passed to `Model`
and are not optimized.
Examples:
>>> from ai4water.datasets import busan_beach
>>> from ai4water.experiments import MLRegressionExperiments
>>> # first compare the performance of all available models without optimizing their parameters
>>> data = busan_beach() # read data file, in this case load the default data
>>> inputs = list(data.columns)[0:-1] # define input and output columns in data
>>> outputs = list(data.columns)[-1]
>>> comparisons = MLRegressionExperiments(
... input_features=inputs, output_features=outputs,
... nan_filler= {'method': 'KNNImputer', 'features': inputs} )
>>> comparisons.fit(data=data,run_type="dry_run")
>>> comparisons.compare_errors('r2', data=data)
>>> # find out the models which resulted in r2> 0.5
>>> best_models = comparisons.compare_errors('r2', cutoff_type='greater',
... cutoff_val=0.3, data=data)
>>> # now build a new experiment for best models and otpimize them
>>> comparisons = MLRegressionExperiments(
... input_features=inputs, output_features=outputs,
... nan_filler= {'method': 'KNNImputer', 'features': inputs},
... exp_name="BestMLModels")
>>> comparisons.fit(data=data, run_type="optimize", include=best_models.index)
>>> comparisons.compare_errors('r2', data=data)
>>> comparisons.taylor_plot() # see help(comparisons.taylor_plot()) to tweak the taylor plot
"""
self.param_space = param_space
self.x0 = x0
if exp_name == "MLRegressionExperiments":
exp_name = f"{exp_name}_{dateandtime_now()}"
super().__init__(
cases=cases,
exp_name=exp_name,
num_samples=num_samples,
verbosity=verbosity,
**model_kws
)
self.spaces = regression_space(num_samples=num_samples)
if catboost is None:
self.models.remove('model_CatBoostRegressor')
if lightgbm is None:
self.models.remove('model_LGBMRegressor')
if xgboost is None:
self.models.remove('model_XGBRFRegressor')
self.models.remove('model_XGBRegressor')
sk_maj_ver = int(sklearn.__version__.split('.')[0])
sk_min_ver = int(sklearn.__version__.split('.')[1])
if sk_maj_ver == 0 and sk_min_ver < 23:
for m in ['model_PoissonRegressor', 'model_TweedieRegressor']:
self.models.remove(m)
@property
def tpot_estimator(self):
try:
from tpot import TPOTRegressor
except (ModuleNotFoundError, ImportError):
TPOTRegressor = None
return TPOTRegressor
@property
def category(self)->str:
return "ML"
@property
def mode(self):
return "regression"
def model_AdaBoostRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.AdaBoostRegressor.html
self.path = "sklearn.ensemble.AdaBoostRegressor"
self.param_space = self.spaces["AdaBoostRegressor"]["param_space"]
self.x0 = self.spaces["AdaBoostRegressor"]["x0"]
return {'model': {'AdaBoostRegressor': kwargs}}
def model_ARDRegression(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ARDRegression.html
self.path = "sklearn.linear_model.ARDRegression"
self.param_space = self.spaces["ARDRegression"]["param_space"]
self.x0 = self.spaces["ARDRegression"]["x0"]
return {'model': {'ARDRegression': kwargs}}
def model_BaggingRegressor(self, **kwargs):
self.path = "sklearn.ensemble.BaggingRegressor"
self.param_space = self.spaces["BaggingRegressor"]["param_space"]
self.x0 = self.spaces["BaggingRegressor"]["x0"]
return {'model': {'BaggingRegressor': kwargs}}
def model_BayesianRidge(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.BayesianRidge.html
self.path = "sklearn.linear_model.BayesianRidge"
self.param_space = self.spaces["BayesianRidge"]["param_space"]
self.x0 = self.spaces["BayesianRidge"]["x0"]
return {'model': {'BayesianRidge': kwargs}}
def model_CatBoostRegressor(self, **kwargs):
# https://catboost.ai/docs/concepts/python-reference_parameters-list.html
self.path = "catboost.CatBoostRegressor"
self.param_space = self.spaces["CatBoostRegressor"]["param_space"]
self.x0 = self.spaces["CatBoostRegressor"]["x0"]
return {'model': {'CatBoostRegressor': kwargs}}
def model_DecisionTreeRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html
self.path = "sklearn.tree.DecisionTreeRegressor"
# TODO not converging
self.param_space = self.spaces["DecisionTreeRegressor"]["param_space"]
self.x0 = self.spaces["DecisionTreeRegressor"]["x0"]
return {'model': {'DecisionTreeRegressor': kwargs}}
def model_DummyRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.dummy.DummyRegressor.html
self.path = "sklearn.dummy.DummyRegressor"
self.param_space = self.spaces["DummyRegressor"]["param_space"]
self.x0 = self.spaces["DummyRegressor"]["x0"]
kwargs.update({'constant': 0.2,
'quantile': 0.2})
return {'model': {'DummyRegressor': kwargs}}
def model_ElasticNet(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ElasticNet.html
self.path = "sklearn.linear_model.ElasticNet"
self.param_space = self.spaces["ElasticNet"]["param_space"]
self.x0 = self.spaces["ElasticNet"]["x0"]
return {'model': {'ElasticNet': kwargs}}
def model_ElasticNetCV(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.ElasticNetCV.html
self.path = "sklearn.linear_model.ElasticNetCV"
self.param_space = self.spaces["ElasticNetCV"]["param_space"]
self.x0 = self.spaces["ElasticNetCV"]["x0"]
return {'model': {'ElasticNetCV': kwargs}}
def model_ExtraTreeRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.tree.ExtraTreeRegressor.htm
self.path = "sklearn.tree.ExtraTreeRegressor"
self.param_space = self.spaces["ExtraTreeRegressor"]["param_space"]
self.x0 = self.spaces["ExtraTreeRegressor"]["x0"]
return {'model': {'ExtraTreeRegressor': kwargs}}
def model_ExtraTreesRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesRegressor.html
self.path = "sklearn.ensemble.ExtraTreesRegressor"
self.param_space = self.spaces["ExtraTreesRegressor"]["param_space"]
self.x0 = self.spaces["ExtraTreesRegressor"]["x0"]
return {'model': {'ExtraTreesRegressor': kwargs}}
# def model_GammaRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.GammaRegressor.html?highlight=gammaregressor
# self.param_space = [
# Real(low=0.0, high=1.0, name='alpha', num_samples=self.num_samples),
# Integer(low=50, high=500, name='max_iter', num_samples=self.num_samples),
# Real(low= 1e-6, high= 1e-2, name='tol', num_samples=self.num_samples),
# Categorical(categories=[True, False], name='warm_start'),
# Categorical(categories=[True, False], name='fit_intercept')
# ]
# self.x0 = [0.5, 100,1e-6, True, True]
# return {'model': {'GammaRegressor': kwargs}}
def model_GaussianProcessRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessRegressor.html
self.path = "sklearn.gaussian_process.GaussianProcessRegressor"
self.param_space = self.spaces["GaussianProcessRegressor"]["param_space"]
self.x0 = self.spaces["GaussianProcessRegressor"]["x0"]
return {'model': {'GaussianProcessRegressor': kwargs}}
def model_GradientBoostingRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.GradientBoostingRegressor.html
self.path = "sklearn.ensemble.GradientBoostingRegressor"
self.param_space = self.spaces["GradientBoostingRegressor"]["param_space"]
self.x0 = self.spaces["GradientBoostingRegressor"]["x0"]
return {'model': {'GradientBoostingRegressor': kwargs}}
def model_HistGradientBoostingRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.HistGradientBoostingRegressor.html
# TODO not hpo not converging
self.path = "sklearn.ensemble.HistGradientBoostingRegressor"
self.param_space = self.spaces["HistGradientBoostingRegressor"]["param_space"]
self.x0 = self.spaces["HistGradientBoostingRegressor"]["x0"]
return {'model': {'HistGradientBoostingRegressor':kwargs}}
def model_HuberRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.HuberRegressor.html
self.path = "sklearn.linear_model.HuberRegressor"
self.param_space = self.spaces["HuberRegressor"]["param_space"]
self.x0 = self.spaces["HuberRegressor"]["x0"]
return {'model': {'HuberRegressor': kwargs}}
def model_KernelRidge(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.kernel_ridge.KernelRidge.html
self.path = "sklearn.kernel_ridge.KernelRidge"
self.param_space = self.spaces["KernelRidge"]["param_space"]
self.x0 = self.spaces["KernelRidge"]["x0"]
return {'model': {'KernelRidge': kwargs}}
def model_KNeighborsRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html
self.path = "sklearn.neighbors.KNeighborsRegressor"
self.param_space = self.spaces["KNeighborsRegressor"]["param_space"]
self.x0 = self.spaces["KNeighborsRegressor"]["x0"]
return {'model': {'KNeighborsRegressor': kwargs}}
def model_LassoLars(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoLars.html
self.path = "sklearn.linear_model.LassoLars"
self.param_space = self.spaces["LassoLars"]["param_space"]
self.x0 = self.spaces["LassoLars"]["x0"]
return {'model': {'LassoLars': kwargs}}
def model_Lars(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lars.html
self.path = "sklearn.linear_model.Lars"
self.param_space = self.spaces["Lars"]["param_space"]
self.x0 = self.spaces["Lars"]["x0"]
return {'model': {'Lars': kwargs}}
def model_LarsCV(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LarsCV.html
self.path = "sklearn.linear_model.LarsCV"
self.param_space = self.spaces["LarsCV"]["param_space"]
self.x0 = self.spaces["LarsCV"]["x0"]
return {'model': {'LarsCV': kwargs}}
def model_LinearSVR(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVR.html
self.path = "sklearn.svm.LinearSVR"
self.param_space = self.spaces["LinearSVR"]["param_space"]
self.x0 = self.spaces["LinearSVR"]["x0"]
return {'model': {'LinearSVR': kwargs}}
def model_Lasso(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lasso.html
self.path = "sklearn.linear_model.Lasso"
self.param_space = self.spaces["Lasso"]["param_space"]
self.x0 = self.spaces["Lasso"]["x0"]
return {'model': {'Lasso': kwargs}}
def model_LassoCV(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoCV.html
self.path = "sklearn.linear_model.LassoCV"
self.param_space = self.spaces["LassoCV"]["param_space"]
self.x0 = self.spaces["LassoCV"]["x0"]
return {'model': {'LassoCV': kwargs}}
def model_LassoLarsCV(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoLarsCV.html
self.path = "sklearn.linear_model.LassoLarsCV"
self.param_space = self.spaces["LassoLarsCV"]["param_space"]
self.x0 = self.spaces["LassoLarsCV"]["x0"]
return {'model': {'LassoLarsCV': kwargs}}
def model_LassoLarsIC(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LassoLarsIC.html
self.path = "sklearn.linear_model.LassoLarsIC"
self.param_space = self.spaces["LassoLarsIC"]["param_space"]
self.x0 = self.spaces["LassoLarsIC"]["x0"]
return {'model': {'LassoLarsIC': kwargs}}
def model_LGBMRegressor(self, **kwargs):
# https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html
self.path = "lightgbm.LGBMRegressor"
self.param_space = self.spaces["LGBMRegressor"]["param_space"]
self.x0 = self.spaces["LGBMRegressor"]["x0"]
return {'model': {'LGBMRegressor': kwargs}}
def model_LinearRegression(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html
self.path = "sklearn.linear_model.LinearRegression"
self.param_space = self.spaces["LinearRegression"]["param_space"]
self.x0 = self.spaces["LinearRegression"]["x0"]
return {'model': {'LinearRegression': kwargs}}
def model_MLPRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html
self.path = "sklearn.neural_network.MLPRegressor"
self.param_space = self.spaces["MLPRegressor"]["param_space"]
self.x0 = self.spaces["MLPRegressor"]["x0"]
return {'model': {'MLPRegressor': kwargs}}
def model_NuSVR(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.svm.NuSVR.html
self.path = "sklearn.svm.NuSVR"
self.param_space = self.spaces["NuSVR"]["param_space"]
self.x0 = self.spaces["NuSVR"]["x0"]
return {'model': {'NuSVR': kwargs}}
def model_OrthogonalMatchingPursuit(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.OrthogonalMatchingPursuit.html
self.path = "sklearn.linear_model.OrthogonalMatchingPursuit"
self.param_space = self.spaces["OrthogonalMatchingPursuit"]["param_space"]
self.x0 = self.spaces["OrthogonalMatchingPursuit"]["x0"]
return {'model': {'OrthogonalMatchingPursuit': kwargs}}
def model_OrthogonalMatchingPursuitCV(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.OrthogonalMatchingPursuitCV.html
self.path = "sklearn.linear_model.OrthogonalMatchingPursuitCV"
self.param_space = self.spaces["OrthogonalMatchingPursuitCV"]["param_space"]
self.x0 = self.spaces["OrthogonalMatchingPursuitCV"]["x0"]
return {'model': {'OrthogonalMatchingPursuitCV': kwargs}}
def model_OneClassSVM(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.svm.OneClassSVM.html
self.path = "sklearn.svm.OneClassSVM"
self.param_space = self.spaces["OneClassSVM"]["param_space"]
self.x0 = self.spaces["OneClassSVM"]["x0"]
return {'model': {'OneClassSVM': kwargs}}
def model_PoissonRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.PoissonRegressor.html
self.path = "sklearn.linear_model.PoissonRegressor"
self.param_space = self.spaces["PoissonRegressor"]["param_space"]
self.x0 = self.spaces["PoissonRegressor"]["x0"]
return {'model': {'PoissonRegressor': kwargs}}
def model_Ridge(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html
self.path = "sklearn.linear_model.Ridge"
self.param_space = self.spaces["Ridge"]["param_space"]
self.x0 = self.spaces["Ridge"]["x0"]
return {'model': {'Ridge': kwargs}}
def model_RidgeCV(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html
self.path = "sklearn.linear_model.RidgeCV"
self.param_space = self.spaces["RidgeCV"]["param_space"]
self.x0 = self.spaces["RidgeCV"]["x0"]
return {'model': {'RidgeCV': kwargs}}
def model_RadiusNeighborsRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.RadiusNeighborsRegressor.html
self.path = "sklearn.neighbors.RadiusNeighborsRegressor"
self.param_space = self.spaces["RadiusNeighborsRegressor"]["param_space"]
self.x0 = self.spaces["RadiusNeighborsRegressor"]["x0"]
return {'model': {'RadiusNeighborsRegressor': kwargs}}
def model_RANSACRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RANSACRegressor.html
self.path = "sklearn.linear_model.RANSACRegressor"
self.param_space = self.spaces["RANSACRegressor"]["param_space"]
self.x0 = self.spaces["RANSACRegressor"]["x0"]
return {'model': {'RANSACRegressor': kwargs}}
def model_RandomForestRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
self.path = "sklearn.ensemble.RandomForestRegressor"
self.param_space = self.spaces["RandomForestRegressor"]["param_space"]
self.x0 = self.spaces["RandomForestRegressor"]["x0"]
return {'model': {'RandomForestRegressor': kwargs}}
def model_SVR(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html
self.path = "sklearn.svm.SVR"
self.param_space = self.spaces["SVR"]["param_space"]
self.x0 = self.spaces["SVR"]["x0"]
return {'model': {'SVR': kwargs}}
def model_SGDRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDRegressor.html
self.path = "sklearn.linear_model.SGDRegressor"
self.param_space = self.spaces["SGDRegressor"]["param_space"]
self.x0 = self.spaces["SGDRegressor"]["x0"]
return {'model': {'SGDRegressor': kwargs}}
# def model_TransformedTargetRegressor(self, **kwargs):
# ## https://scikit-learn.org/stable/modules/generated/sklearn.compose.TransformedTargetRegressor.html
# self.param_space = [
# Categorical(categories=[None], name='regressor'),
# Categorical(categories=[None], name='transformer'),
# Categorical(categories=[None], name='func')
# ]
# self.x0 = [None, None, None]
# return {'model': {'TransformedTargetRegressor': kwargs}}
def model_TweedieRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.TweedieRegressor.html
self.path = "sklearn.linear_model.TweedieRegressor"
self.param_space = self.spaces["TweedieRegressor"]["param_space"]
self.x0 = self.spaces["TweedieRegressor"]["x0"]
return {'model': {'TweedieRegressor': kwargs}}
def model_TheilSenRegressor(self, **kwargs):
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.TheilSenRegressor.html
self.path = "sklearn.linear_model.TheilSenRegressor"
self.param_space = self.spaces["TheilSenRegressor"]["param_space"]
self.x0 = self.spaces["TheilSenRegressor"]["x0"]
return {'model': {'TheilSenRegressor': kwargs}}
# TODO
# def model_GAMMAREGRESSOR(self, **kwargs):
# # ValueError: Some value(s) of y are out of the valid range for family GammaDistribution
# return {'GAMMAREGRESSOR': {}}
def model_XGBRFRegressor(self, **kwargs):
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.XGBRFRegressor
self.path = "xgboost.XGBRFRegressor"
self.param_space = self.spaces["XGBRFRegressor"]["param_space"]
self.x0 = self.spaces["XGBRFRegressor"]["x0"]
return {'model': {'XGBRFRegressor': kwargs}}
def model_XGBRegressor(self, **kwargs):
# ##https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.XGBRegressor
self.path = "xgboost.XGBRegressor"
self.param_space = self.spaces["XGBRegressor"]["param_space"]
self.x0 = self.spaces["XGBRegressor"]["x0"]
return {'model': {'XGBRegressor': kwargs}} | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/experiments/_rgr.py | _rgr.py |
__all__ = ["DLRegressionExperiments", "DLClassificationExperiments"]
from ai4water.backend import tf
from ai4water.utils.utils import jsonize
from ai4water.hyperopt import Integer, Real, Categorical
from ai4water.utils.utils import dateandtime_now
from ai4water.models import MLP, CNN, LSTM, CNNLSTM, LSTMAutoEncoder, TFT, TCN
from ._main import Experiments
from .utils import dl_space
class DLRegressionExperiments(Experiments):
"""
A framework for comparing several basic DL architectures for a given data.
This class can also be used for hyperparameter optimization of more than
one DL models/architectures. However, the parameters which determine
the dimensions of input data such as ``lookback`` should are
not allowed to optimize when using random or grid search.
To check the available models
>>> exp = DLRegressionExperiments(...)
>>> exp.models
If learning rate, batch size, and lookback are are to be optimzied,
their space can be specified in the following way:
>>> exp = DLRegressionExperiments(...)
>>> exp.lookback_space = [Integer(1, 100, name='lookback')]
Example
-------
>>> from ai4water.experiments import DLRegressionExperiments
>>> from ai4water.datasets import busan_beach
>>> data = busan_beach()
>>> exp = DLRegressionExperiments(
>>> input_features = data.columns.tolist()[0:-1],
>>> output_features = data.columns.tolist()[-1:],
>>> epochs=300,
>>> train_fraction=1.0,
>>> y_transformation="log",
>>> x_transformation="minmax",
>>> ts_args={'lookback':9}
>>> )
... # runt he experiments
>>> exp.fit(data=data)
"""
def __init__(
self,
input_features: list,
param_space=None,
x0=None,
cases: dict = None,
exp_name: str = None,
num_samples: int = 5,
verbosity: int = 1,
**model_kws
):
"""initializes the experiment."""
self.input_features = input_features
self.param_space = param_space
# batch_size and lr will come from x0 so should not be
# in model_kws
if 'batch_size' in model_kws:
self.batch_size_x0 = model_kws.pop('batch_size')
else:
self.batch_size_x0 = 32
if 'lr' in model_kws:
self.lr_x0 = model_kws.pop('lr')
else:
self.lr_x0 = 0.001
self.x0 = x0
# during model initiation, we must provide input_features argument
model_kws['input_features'] = input_features
self.lookback_space = []
self.batch_size_space = Categorical(categories=[4, 8, 12, 16, 32],
name="batch_size")
self.lr_space = Real(1e-5, 0.005, name="lr")
exp_name = exp_name or 'DLExperiments' + f'_{dateandtime_now()}'
super().__init__(
cases=cases,
exp_name=exp_name,
num_samples=num_samples,
verbosity=verbosity,
**model_kws
)
self.spaces = dl_space(num_samples=num_samples)
@property
def category(self):
return "DL"
@property
def input_shape(self) -> tuple:
features = len(self.input_features)
shape = features,
if "ts_args" in self.model_kws:
if "lookback" in self.model_kws['ts_args']:
shape = self.model_kws['ts_args']['lookback'], features
return shape
@property
def lookback_space(self):
return self._lookback_space
@lookback_space.setter
def lookback_space(self, space):
self._lookback_space = space
@property
def batch_size_space(self):
return self._batch_size_space
@batch_size_space.setter
def batch_size_space(self, bs_space):
self._batch_size_space = bs_space
@property
def lr_space(self):
return self._lr_space
@lr_space.setter
def lr_space(self, lr_space):
self._lr_space = lr_space
@property
def static_space(self):
_space = []
if self.lookback_space:
_space.append(self.lookback_space)
if self.batch_size_space:
_space.append(self.batch_size_space)
if self.lr_space:
_space.append(self.lr_space)
return _space
@property
def static_x0(self):
_x0 = []
if self.lookback_space:
_x0.append(self.model_kws.get('lookback', 5))
if self.batch_size_space:
_x0.append(self.batch_size_x0)
if self.lr_space:
_x0.append(self.lr_x0)
return _x0
@property
def mode(self):
return "regression"
@property
def tpot_estimator(self):
return None
def _pre_build_hook(self, **suggested_paras):
"""suggested_paras contain model configuration which
may contain executable tf layers which should be
serialized properly.
"""
suggested_paras = jsonize(suggested_paras, {
tf.keras.layers.Layer: tf.keras.layers.serialize})
return suggested_paras
def model_MLP(self, **kwargs):
"""multi-layer perceptron model"""
self.param_space = self.spaces["MLP"]["param_space"] + self.static_space
self.x0 = self.spaces["MLP"]["x0"] + self.static_x0
_kwargs = {}
for arg in ['batch_size', 'lr']:
if arg in kwargs:
_kwargs[arg] = kwargs.pop(arg)
config = {'model': MLP(input_shape=self.input_shape,
mode=self.mode,
**kwargs)}
config.update(_kwargs)
return config
def model_LSTM(self, **kwargs):
"""LSTM based model"""
self.param_space = self.spaces["LSTM"]["param_space"] + self.static_space
self.x0 = self.spaces["LSTM"]["x0"] + self.static_x0
_kwargs = {}
for arg in ['batch_size', 'lr']:
if arg in kwargs:
_kwargs[arg] = kwargs.pop(arg)
config = {'model': LSTM(input_shape=self.input_shape,
mode=self.mode,
**kwargs)}
config.update(_kwargs)
return config
def model_CNN(self, **kwargs):
"""1D CNN based model"""
self.param_space = self.spaces["CNN"]["param_space"] + self.static_space
self.x0 = self.spaces["CNN"]["x0"] + self.static_x0
_kwargs = {}
for arg in ['batch_size', 'lr']:
if arg in kwargs:
_kwargs[arg] = kwargs.pop(arg)
config = {'model': CNN(input_shape=self.input_shape,
mode=self.mode,
**kwargs)}
config.update(_kwargs)
return config
def model_CNNLSTM(self, **kwargs)->dict:
"""CNN-LSTM model"""
self.param_space = self.spaces["CNNLSTM"]["param_space"] + self.static_space
self.x0 = self.spaces["CNNLSTM"]["x0"] + self.static_x0
_kwargs = {}
for arg in ['batch_size', 'lr']:
if arg in kwargs:
_kwargs[arg] = kwargs.pop(arg)
assert len(self.input_shape) == 2
config = {'model': CNNLSTM(input_shape=self.input_shape,
mode=self.mode,
**kwargs)}
config.update(_kwargs)
return config
def model_LSTMAutoEncoder(self, **kwargs):
"""LSTM based auto-encoder model."""
self.param_space = self.spaces["LSTMAutoEncoder"]["param_space"] + self.static_space
self.x0 = self.spaces["LSTMAutoEncoder"]["x0"] + self.static_x0
_kwargs = {}
for arg in ['batch_size', 'lr']:
if arg in kwargs:
_kwargs[arg] = kwargs.pop(arg)
config = {'model': LSTMAutoEncoder(input_shape=self.input_shape,
mode=self.mode,
**kwargs)}
config.update(_kwargs)
return config
def model_TCN(self, **kwargs):
"""Temporal Convolution network based model."""
self.param_space = self.spaces["TCN"]["param_space"] + self.static_space
self.x0 = self.spaces["TCN"]["x0"] + self.static_x0
_kwargs = {}
for arg in ['batch_size', 'lr']:
if arg in kwargs:
_kwargs[arg] = kwargs.pop(arg)
config = {'model': TCN(input_shape=self.input_shape,
mode=self.mode,
**kwargs)}
config.update(_kwargs)
return config
def model_TFT(self, **kwargs):
"""temporal fusion transformer model."""
self.param_space = self.spaces["TFT"]["param_space"] + self.static_space
self.x0 = self.spaces["TFT"]["x0"] + self.static_x0
_kwargs = {}
for arg in ['batch_size', 'lr']:
if arg in kwargs:
_kwargs[arg] = kwargs.pop(arg)
config = {'model': TFT(input_shape=self.input_shape,
**kwargs)}
config.update(_kwargs)
return config
class DLClassificationExperiments(DLRegressionExperiments):
"""
Compare multiple neural network architectures for a classification problem
Examples
---------
>>> from ai4water.experiments import DLClassificationExperiments
>>> from ai4water.datasets import MtropicsLaos
>>> data = MtropicsLaos().make_classification(
... input_features=['air_temp', 'rel_hum'],
... lookback_steps=5)
... #define inputs and outputs
>>> inputs = data.columns.tolist()[0:-1]
>>> outputs = data.columns.tolist()[-1:]
... #create the experiments class
>>> exp = DLClassificationExperiments(
... input_features=inputs,
... output_features=outputs,
... epochs=5,
... ts_args={"lookback": 5}
...)
... #run the experiments
>>> exp.fit(data=data, include=["TFT", "MLP"])
"""
def __init__(
self,
exp_name=f"DLClassificationExperiments_{dateandtime_now()}",
*args,
**kwargs):
super(DLClassificationExperiments, self).__init__(
exp_name=exp_name,
*args, **kwargs
)
@property
def mode(self):
return "classification"
def metric_kws(self, metric_name:str=None):
kws = {
'precision': {'average': 'macro'},
'recall': {'average': 'macro'},
'f1_score': {'average': 'macro'},
}
return kws.get(metric_name, {}) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/experiments/_dl.py | _dl.py |
import sys
from ai4water.hyperopt import Integer, Real, Categorical
def regression_space(
num_samples:int,
num_examples:int=None,
verbosity: bool = 0
)->dict:
spaces = {
"AdaBoostRegressor":{
"param_space": [
Integer(low=5, high=100, name='n_estimators', num_samples=num_samples),
Real(low=0.001, high=1.0, prior='log-uniform', name='learning_rate', num_samples=num_samples)],
"x0":
[50, 1.0]},
"ARDRegression":{
"param_space":[
Real(low=1e-7, high=1e-5, name='alpha_1', num_samples=num_samples),
Real(low=1e-7, high=1e-5, name='alpha_2', num_samples=num_samples),
Real(low=1e-7, high=1e-5, name='lambda_1', num_samples=num_samples),
Real(low=1e-7, high=1e-5, name='lambda_2', num_samples=num_samples),
Real(low=1000, high=1e5, name='threshold_lambda', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept')],
"x0":
[1e-7, 1e-7, 1e-7, 1e-7, 1000, True]},
"BaggingRegressor": {
"param_space": [
Integer(low=5, high=50, name='n_estimators', num_samples=num_samples),
Real(low=0.1, high=1.0, name='max_samples', num_samples=num_samples),
Real(low=0.1, high=1.0, name='max_features', num_samples=num_samples),
Categorical(categories=[True, False], name='bootstrap'),
Categorical(categories=[True, False], name='bootstrap_features')],
#Categorical(categories=[True, False], name='oob_score')], # linked with bootstrap
"x0":
[10, 1.0, 1.0, True, False]},
"BayesianRidge": {
"param_space": [
Integer(low=40, high=1000, name='n_iter', num_samples=num_samples),
Real(low=1e-7, high=1e-5, name='alpha_1', num_samples=num_samples),
Real(low=1e-7, high=1e-5, name='alpha_2', num_samples=num_samples),
Real(low=1e-7, high=1e-5, name='lambda_1', num_samples=num_samples),
Real(low=1e-7, high=1e-5, name='lambda_2', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept')],
"x0": [40, 1e-7, 1e-7, 1e-7, 1e-7, True]},
"DummyRegressor": {
"param_space": [
Categorical(categories=['mean', 'median', 'quantile'], name='strategy')],
"x0":
['quantile']},
"KNeighborsRegressor": {
"param_space": [
Integer(low=3, high=num_examples or 50, name='n_neighbors', num_samples=num_samples),
Categorical(categories=['uniform', 'distance'], name='weights'),
Categorical(categories=['auto', 'ball_tree', 'kd_tree', 'brute'], name='algorithm'),
Integer(low=10, high=100, name='leaf_size', num_samples=num_samples),
Integer(low=1, high=5, name='p', num_samples=num_samples)],
"x0":
[5, 'uniform', 'auto', 30, 2]},
"LassoLars":{
"param_space": [
Real(low=1.0, high=5.0, name='alpha', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept')],
"x0":
[1.0, False]},
"Lars": {
"param_space": [
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=100, high=1000, name='n_nonzero_coefs', num_samples=num_samples)],
"x0":
[True, 100]},
"LarsCV": {
"param_space": [
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=100, high=1000, name='max_iter', num_samples=num_samples),
Integer(low=100, high=5000, name='max_n_alphas', num_samples=num_samples)],
"x0":
[True, 500, 1000]},
"LinearSVR": {
"param_space": [
Real(low=1.0, high=5.0, name='C', num_samples=num_samples),
Real(low=0.01, high=0.9, name='epsilon', num_samples=num_samples),
Real(low=1e-5, high=1e-1, name='tol', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept')],
"x0":
[1.0, 0.01, 1e-5, True]},
"Lasso": {
"param_space": [
Real(low=1.0, high=5.0, name='alpha', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept'),
Real(low=1e-5, high=1e-1, name='tol', num_samples=num_samples)],
"x0":
[1.0, True, 1e-5]},
"LassoCV": {
"param_space": [
Real(low=1e-5, high=1e-2, name='eps', num_samples=num_samples),
Integer(low=10, high=1000, name='n_alphas', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=500, high=5000, name='max_iter', num_samples=num_samples)],
"x0":
[1e-3, 100, True, 1000]},
"LassoLarsCV": {
"param_space": [
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=500, high=5000, name='max_n_alphas', num_samples=num_samples)],
"x0":
[True, 1000]},
"LassoLarsIC": {
"param_space": [
Categorical(categories=[True, False], name='fit_intercept'),
Categorical(categories=['bic', 'aic'], name='criterion')],
"x0":
[True, 'bic']},
"LinearRegression": {
"param_space": [
Categorical(categories=[True, False], name='fit_intercept')],
"x0":
[True]},
"MLPRegressor": {
"param_space": [
Integer(low=10, high=500, name='hidden_layer_sizes', num_samples=num_samples),
Categorical(categories=['identity', 'logistic', 'tanh', 'relu'], name='activation'),
Categorical(categories=['lbfgs', 'sgd', 'adam'], name='solver'),
Real(low=1e-6, high=1e-3, name='alpha', num_samples=num_samples),
# Real(low=1e-6, high=1e-3, name='learning_rate')
Categorical(categories=['constant', 'invscaling', 'adaptive'], name='learning_rate'),],
"x0":
[10, 'relu', 'adam', 1e-6, 'constant']},
"NuSVR": {
"param_space": [
Real(low=0.5,high=0.9, name='nu', num_samples=num_samples),
Real(low=1.0, high=5.0, name='C', num_samples=num_samples),
Categorical(categories=['linear', 'poly', 'rbf', 'sigmoid'], name='kernel')],
"x0":
[0.5, 1.0, 'sigmoid']},
"OrthogonalMatchingPursuit": {
"param_space": [
Categorical(categories=[True, False], name='fit_intercept'),
Real(low=0.1, high=10, name='tol', num_samples=num_samples)],
"x0":
[True, 0.1]},
"OrthogonalMatchingPursuitCV": {
"param_space": [
# Integer(low=10, high=100, name='max_iter'),
Categorical(categories=[True, False], name='fit_intercept')],
"x0":
[ # 50,
True]},
"OneClassSVM": {
"param_space": [
Categorical(categories=['linear', 'poly', 'rbf', 'sigmoid'], name='kernel'),
Real(low=0.1, high=0.9, name='nu', num_samples=num_samples),
Categorical(categories=[True, False], name='shrinking')],
"x0":
['rbf', 0.1, True]},
"PoissonRegressor": {
"param_space": [
Real(low=0.0, high=1.0, name='alpha', num_samples=num_samples),
# Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=50, high=500, name='max_iter', num_samples=num_samples)],
"x0":
[0.5, 100]},
"Ridge": {
"param_space": [
Real(low=0.0, high=3.0, name='alpha', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept'),
Categorical(categories=['auto', 'svd', 'cholesky', 'saga'], name='solver'),],
"x0":
[1.0, True, 'auto']},
"RidgeCV": {
"param_space": [
Categorical(categories=[True, False], name='fit_intercept'),
Categorical(categories=['auto', 'svd', 'eigen'], name='gcv_mode'),],
"x0":
[True, 'auto']},
"RadiusNeighborsRegressor": {
"param_space": [
Categorical(categories=['uniform', 'distance'], name='weights'),
Categorical(categories=['auto', 'ball_tree', 'kd_tree', 'brute'], name='algorithm'),
Integer(low=10, high=300, name='leaf_size', num_samples=num_samples),
Integer(low=1,high=5, name='p', num_samples=num_samples)],
"x0":
['uniform', 'auto', 10, 1]},
"RANSACRegressor": {
"param_space": [
Integer(low=10, high=1000, name='max_trials', num_samples=num_samples),
Real(low=0.01, high=0.99, name='min_samples', num_samples=num_samples)],
"x0":
[10, 0.01]},
"TweedieRegressor": {
"param_space": [
Real(low=0.0, high=5.0, name='alpha', num_samples=num_samples),
Categorical(categories=['auto', 'identity', 'log'], name='link'),
Integer(low=50, high=500, name='max_iter', num_samples=num_samples)],
"x0":
[1.0, 'auto',100]},
"TheilSenRegressor": {
"param_space": [
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=30, high=1000, name='max_iter', num_samples=num_samples),
Real(low=1e-5, high=1e-1, name='tol', num_samples=num_samples),
# Integer(low=self.data.shape[1]+1, high=len(self.data), name='n_subsamples')
],
"x0":
[True, 50, 0.001]},
"XGBRegressor": {
"param_space": [
# Number of gradient boosted trees
Integer(low=5, high=200, name='n_estimators', num_samples=num_samples),
# Maximum tree depth for base learners
#Integer(low=3, high=50, name='max_depth', num_samples=num_samples),
Real(low=0.0001, high=0.5, name='learning_rate', prior='log-uniform', num_samples=num_samples),
Categorical(categories=['gbtree', 'gblinear', 'dart'], name='booster'),
# Minimum loss reduction required to make a further partition on a leaf node of the tree.
# Real(low=0.1, high=0.9, name='gamma', num_samples=self.num_samples),
# Minimum sum of instance weight(hessian) needed in a child.
# Real(low=0.1, high=0.9, name='min_child_weight', num_samples=self.num_samples),
# Maximum delta step we allow each tree’s weight estimation to be.
# Real(low=0.1, high=0.9, name='max_delta_step', num_samples=self.num_samples),
# Subsample ratio of the training instance.
# Real(low=0.1, high=0.9, name='subsample', num_samples=self.num_samples),
# Real(low=0.1, high=0.9, name='colsample_bytree', num_samples=self.num_samples),
# Real(low=0.1, high=0.9, name='colsample_bylevel', num_samples=self.num_samples),
# Real(low=0.1, high=0.9, name='colsample_bynode', num_samples=self.num_samples),
# Real(low=0.1, high=0.9, name='reg_alpha', num_samples=self.num_samples),
# Real(low=0.1, high=0.9, name='reg_lambda', num_samples=self.num_samples)
],
"x0":
None},
"RandomForestRegressor": {
"param_space": [
Integer(low=5, high=50, name='n_estimators', num_samples=num_samples),
Integer(low=3, high=30, name='max_depth', num_samples=num_samples),
Real(low=0.1, high=0.5, name='min_samples_split', num_samples=num_samples),
# Real(low=0.1, high=1.0, name='min_samples_leaf'),
Real(low=0.0, high=0.5, name='min_weight_fraction_leaf', num_samples=num_samples),
Categorical(categories=['auto', 'sqrt', 'log2'], name='max_features')],
"x0":
[10, 5, 0.4, # 0.2,
0.1, 'auto']},
"GradientBoostingRegressor": {
"param_space": [
# number of boosting stages to perform
Integer(low=5, high=500, name='n_estimators', num_samples=num_samples),
# shrinks the contribution of each tree
Real(low=0.001, high=1.0, prior='log-uniform', name='learning_rate', num_samples=num_samples),
# fraction of samples to be used for fitting the individual base learners
Real(low=0.1, high=1.0, name='subsample', num_samples=num_samples),
Real(low=0.1, high=0.9, name='min_samples_split', num_samples=num_samples),
Integer(low=2, high=30, name='max_depth', num_samples=num_samples)],
"x0":
[5, 0.001, 1, 0.1, 3]},
"LGBMRegressor": {
"param_space": [
# todo, during optimization not working with 'rf'
Categorical(categories=['gbdt', 'dart', 'goss'], name='boosting_type'),
Integer(low=10, high=200, name='num_leaves', num_samples=num_samples),
Real(low=0.0001, high=0.1, name='learning_rate', prior='log-uniform', num_samples=num_samples),
Integer(low=20, high=100, name='n_estimators', num_samples=num_samples)],
"x0":
['gbdt', 31, 0.1, 100]},
"CatBoostRegressor": {
"param_space": [
# maximum number of trees that can be built
Integer(low=20, high=100, name='iterations', num_samples=num_samples),
# Used for reducing the gradient step.
Real(low=0.0001, high=0.5, prior='log-uniform', name='learning_rate', num_samples=num_samples),
# Coefficient at the L2 regularization term of the cost function.
Real(low=0.5, high=5.0, name='l2_leaf_reg', num_samples=num_samples),
# arger the value, the smaller the model size.
Real(low=0.1, high=10, name='model_size_reg', num_samples=num_samples),
# percentage of features to use at each split selection, when features are selected over again at random.
Real(low=0.1, high=0.95, name='rsm', num_samples=num_samples),
# number of splits for numerical features
Integer(low=32, high=1032, name='border_count', num_samples=num_samples),
# The quantization mode for numerical features. The quantization mode for numerical features.
Categorical(categories=['Median', 'Uniform', 'UniformAndQuantiles',
'MaxLogSum', 'MinEntropy', 'GreedyLogSum'], name='feature_border_type')],
"x0":
[50, 0.01, 3.0, 0.5, 0.5, 32, 'GreedyLogSum']},
"DecisionTreeRegressor": {
"param_space": [
Categorical(["best", "random"], name='splitter'),
Integer(low=2, high=10, name='min_samples_split', num_samples=num_samples),
# Real(low=1, high=5, name='min_samples_leaf'),
Real(low=0.0, high=0.5, name="min_weight_fraction_leaf", num_samples=num_samples),
Categorical(categories=['auto', 'sqrt', 'log2'], name="max_features")],
"x0":
['best', 2, 0.0, 'auto']},
"ElasticNet": {
"param_space": [
Real(low=1.0, high=5.0, name='alpha', num_samples=num_samples),
Real(low=0.1, high=1.0, name='l1_ratio', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=500, high=5000, name='max_iter', num_samples=num_samples),
Real(low=1e-5, high=1e-3, name='tol', num_samples=num_samples)],
"x0":
[2.0, 0.2, True, 1000, 1e-4]},
"ElasticNetCV": {
"param_space": [
Real(low=0.1, high=1.0, name='l1_ratio', num_samples=num_samples),
Real(low=1e-5, high=1e-2, name='eps', num_samples=num_samples),
Integer(low=10, high=1000, name='n_alphas', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=500, high=5000, name='max_iter', num_samples=num_samples)],
"x0":
[0.5, 1e-3, 100, True, 1000]},
"ExtraTreeRegressor": {
"param_space": [
Integer(low=3, high=30, name='max_depth', num_samples=num_samples),
Real(low=0.1, high=0.5, name='min_samples_split', num_samples=num_samples),
Real(low=0.0, high=0.5, name='min_weight_fraction_leaf', num_samples=num_samples),
Categorical(categories=['auto', 'sqrt', 'log2'], name='max_features')],
"x0":
[5, 0.2, 0.2, 'auto']},
"ExtraTreesRegressor": {
"param_space": [
Integer(low=5, high=500, name='n_estimators', num_samples=num_samples),
Integer(low=3, high=30, name='max_depth', num_samples=num_samples),
Integer(low=2, high=10, name='min_samples_split', num_samples=num_samples),
Integer(low=1, high=10, num_samples=num_samples, name='min_samples_leaf'),
Real(low=0.0, high=0.5, name='min_weight_fraction_leaf', num_samples=num_samples),
Categorical(categories=['auto', 'sqrt', 'log2'], name='max_features')],
"x0":
[100, 5, 2, 1, 0.0, 'auto']},
"GaussianProcessRegressor": {
"param_space": [
Real(low=1e-10, high=1e-7, name='alpha', num_samples=num_samples),
Integer(low=0, high=5, name='n_restarts_optimizer', num_samples=num_samples)],
"x0":
[1e-10, 1]},
"HistGradientBoostingRegressor": {
"param_space": [
# Used for reducing the gradient step.
Real(low=0.0001, high=0.9, prior='log-uniform', name='learning_rate', num_samples=num_samples),
Integer(low=50, high=500, name='max_iter', num_samples=num_samples), # maximum number of trees.
Integer(low=2, high=100, name='max_depth', num_samples=num_samples), # maximum number of trees.
# maximum number of leaves for each tree
Integer(low=10, high=100, name='max_leaf_nodes', num_samples=num_samples),
# minimum number of samples per leaf
Integer(low=10, high=100, name='min_samples_leaf', num_samples=num_samples),
# Used for reducing the gradient step.
Real(low=00, high=0.5, name='l2_regularization', num_samples=num_samples)],
"x0":
[0.1, 100, 10, 31, 20, 0.0]},
"HuberRegressor": {
"param_space": [
Real(low=1.0, high=5.0, name='epsilon', num_samples=num_samples),
Integer(low=50, high=500, name='max_iter', num_samples=num_samples),
Real(low=1e-5, high=1e-2, name='alpha', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept')],
"x0":
[2.0, 50, 1e-5, False]},
"KernelRidge": {
"param_space": [
Real(low=1.0, high=5.0, name='alpha', num_samples=num_samples)
# Categorical(categories=['poly', 'linear', name='kernel'])
],
"x0":
[1.0]},
"SVR": {
"param_space": [
# https://stackoverflow.com/questions/60015497/valueerror-precomputed-matrix-must-be-a-square-matrix-input-is-a-500x29243-mat
# todo, optimization not working with 'precomputed'
Categorical(categories=['linear', 'poly', 'rbf', 'sigmoid'], name='kernel'),
Real(low=1.0, high=5.0, name='C', num_samples=num_samples),
Real(low=0.01, high=0.9, name='epsilon', num_samples=num_samples)],
"x0":
['rbf',1.0, 0.01]},
"SGDRegressor": {
"param_space": [
Categorical(categories=['l1', 'l2', 'elasticnet'], name='penalty'),
Real(low=0.01, high=1.0, name='alpha', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=500, high=5000, name='max_iter', num_samples=num_samples),
Categorical(categories=['constant', 'optimal', 'invscaling', 'adaptive'], name='learning_rate')],
"x0":
['l2', 0.1, True, 1000, 'invscaling']},
"XGBRFRegressor": {
"param_space": [
# Number of gradient boosted trees
Integer(low=5, high=100, name='n_estimators', num_samples=num_samples),
# Maximum tree depth for base learners
Integer(low=3, high=50, name='max_depth', num_samples=num_samples),
Real(low=0.0001, high=0.5, prior='log-uniform', name='learning_rate', num_samples=num_samples),
# Categorical(categories=['gbtree', 'gblinear', 'dart'], name='booster'), # todo solve error
# Minimum loss reduction required to make a further partition on a leaf node of the tree.
Real(low=0.1, high=0.9, name='gamma', num_samples=num_samples),
# Minimum sum of instance weight(hessian) needed in a child.
Real(low=0.1, high=0.9, name='min_child_weight', num_samples=num_samples),
# Maximum delta step we allow each tree’s weight estimation to be.
Real(low=0.1, high=0.9, name='max_delta_step', num_samples=num_samples),
# Subsample ratio of the training instance.
Real(low=0.1, high=0.9, name='subsample', num_samples=num_samples),
Real(low=0.1, high=0.9, name='colsample_bytree', num_samples=num_samples),
Real(low=0.1, high=0.9, name='colsample_bylevel', num_samples=num_samples),
Real(low=0.1, high=0.9, name='colsample_bynode', num_samples=num_samples),
Real(low=0.1, high=0.9, name='reg_alpha', num_samples=num_samples),
Real(low=0.1, high=0.9, name='reg_lambda', num_samples=num_samples)],
"x0":
[50, 3, 0.001, 0.1, 0.1, 0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.1]}
}
# remove the estimators from those libraries which are not available/installed
libraries_to_models = {
'catboost': ['CatBoostRegressor'],
'xgboost': ['XGBRFRegressor', 'XGBRegressor'],
'lightgbm': ['LGBMRegressor']
}
_remove_estimator(spaces, libraries_to_models, verbosity)
return spaces
def classification_space(num_samples:int,
verbosity=0,
n_features:int = None):
ridge_cls = _ridge_classifier(num_samples=num_samples)
ridge_cls_cv = _ridge_classifiercv()
spaces = {
"AdaBoostClassifier": {
"param_space": [
Integer(low=10, high=500, name='n_estimators', num_samples=num_samples),
Real(low=1.0, high=5.0, name='learning_rate', num_samples=num_samples),
Categorical(categories=['SAMME', 'SAMME.R'], name='algorithm')],
"x0":
[50, 1.0, 'SAMME']},
"BaggingClassifier": {
"param_space": [
Integer(low=5, high=50, name='n_estimators', num_samples=num_samples),
Real(low=0.1, high=1.0, name='max_samples', num_samples=num_samples),
Real(low=0.1, high=1.0, name='max_features', num_samples=num_samples),
Categorical(categories=[True, False], name='bootstrap'),
Categorical(categories=[True, False], name='bootstrap_features')
# Categorical(categories=[True, False], name='oob_score'), # linked with bootstrap
],
"x0":
[10, 1.0, 1.0, True, False]},
"BernoulliNB": {
"param_space": [
Real(low=0.1, high=1.0, name='alpha', num_samples=num_samples),
Real(low=0.0, high=1.0, name='binarize', num_samples=num_samples)],
"x0":
[0.5, 0.5]},
"CalibratedClassifierCV": {
"param_space": [
Categorical(categories=['sigmoid', 'isotonic'], name='method'),
#Integer(low=5, high=50, name='n_jobs', num_samples=num_samples)
],
"x0":
['sigmoid']},
"DecisionTreeClassifier": {
"param_space": [
Categorical(["best", "random"], name='splitter'),
Integer(low=2, high=10, name='min_samples_split', num_samples=num_samples),
# Real(low=1, high=5, name='min_samples_leaf'),
Real(low=0.0, high=0.5, name="min_weight_fraction_leaf", num_samples=num_samples),
Categorical(categories=['auto', 'sqrt', 'log2'], name="max_features"),],
"x0":
['best', 2, 0.0, 'auto']},
"DummyClassifier": {
"param_space": [
Categorical(categories=['stratified', 'most_frequent', 'prior', 'uniform', 'constant'],
name='strategy')],
"x0":
['prior']},
"ExtraTreeClassifier": {
"param_space": [
Integer(low=3, high=30, name='max_depth', num_samples=num_samples),
Real(low=0.1, high=0.5, name='min_samples_split', num_samples=num_samples),
Real(low=0.0, high=0.5, name='min_weight_fraction_leaf', num_samples=num_samples),
Categorical(categories=['auto', 'sqrt', 'log2'], name='max_features')],
"x0":
[5, 0.2, 0.2, 'auto']},
"ExtraTreesClassifier": {
"param_space": [
Integer(low=5, high=50, name='n_estimators', num_samples=num_samples),
Integer(low=3, high=30, name='max_depth', num_samples=num_samples),
Real(low=0.1, high=0.5, name='min_samples_split', num_samples=num_samples),
Real(low=0.0, high=0.5, name='min_weight_fraction_leaf', num_samples=num_samples),
Categorical(categories=['auto', 'sqrt', 'log2'], name='max_features')],
"x0": [10, 5, 0.4, 0.1, 'auto']},
"GaussianProcessClassifier": {
"param_space": [
Integer(low=0, high=5, name='n_restarts_optimizer', num_samples=num_samples)],
"x0":
[1]},
"GradientBoostingClassifier": {
"param_space": [
# number of boosting stages to perform
Integer(low=5, high=500, name='n_estimators', num_samples=num_samples),
# shrinks the contribution of each tree
Real(low=0.001, high=1.0, prior='log-uniform', name='learning_rate', num_samples=num_samples),
# fraction of samples to be used for fitting the individual base learners
Real(low=0.1, high=1.0, name='subsample', num_samples=num_samples),
Real(low=0.1, high=0.9, name='min_samples_split', num_samples=num_samples),
Integer(low=2, high=30, name='max_depth', num_samples=num_samples)],
"x0":
[5, 0.001, 1, 0.1, 3]},
"HistGradientBoostingClassifier": {
"param_space": [
# Used for reducing the gradient step.
Real(low=0.0001, high=0.9, prior='log-uniform', name='learning_rate', num_samples=num_samples),
Integer(low=50, high=500, name='max_iter', num_samples=num_samples), # maximum number of trees.
Integer(low=2, high=100, name='max_depth', num_samples=num_samples), # maximum number of trees.
# maximum number of leaves for each tree
Integer(low=10, high=100, name='max_leaf_nodes', num_samples=num_samples),
# minimum number of samples per leaf
Integer(low=10, high=100, name='min_samples_leaf', num_samples=num_samples),
# Used for reducing the gradient step.
Real(low=00, high=0.5, name='l2_regularization', num_samples=num_samples)],
"x0": [0.1, 100, 10, 31, 20, 0.0]},
"KNeighborsClassifier": {
"param_space": [
Integer(low=3, high=5, name='n_neighbors', num_samples=num_samples),
Categorical(categories=['uniform', 'distance'], name='weights'),
Categorical(categories=['auto', 'ball_tree', 'kd_tree', 'brute'], name='algorithm'),
Integer(low=10, high=100, name='leaf_size', num_samples=num_samples),
Integer(low=1, high=5, name='p', num_samples=num_samples)],
"x0":
[5, 'uniform', 'auto', 30, 2]},
"LabelPropagation": {
"param_space": [
Categorical(categories=['knn', 'rbf'], name='kernel'),
Integer(low=5, high=10, name='n_neighbors', num_samples=num_samples),
Integer(low=50, high=1000, name='max_iter', num_samples=num_samples),
Real(low=1e-6, high=1e-2, name='tol', num_samples=num_samples),
#Integer(low=2, high=10, name='n_jobs', num_samples=num_samples)
],
"x0":
['knn', 5, 50, 1e-4]},
"LabelSpreading": {
"param_space": [
Categorical(categories=['knn', 'rbf'], name='kernel'),
Integer(low=5, high=10, name='n_neighbors', num_samples=num_samples),
Integer(low=10, high=100, name='max_iter', num_samples=num_samples),
Real(low=0.1, high=1.0, name='alpha', num_samples=num_samples),
Real(low=1e-6, high=1e-2, name='tol', num_samples=num_samples),
#Integer(low=2, high=50, name='n_jobs', num_samples=num_samples)
],
"x0":
['knn', 5, 10, 0.1, 1e-4]},
"LGBMClassifier": {
"param_space": [
Categorical(categories=['gbdt', 'dart', 'goss', 'rf'], name='boosting_type'),
Integer(low=10, high=200, name='num_leaves', num_samples=num_samples),
Real(low=0.0001, high=0.1, prior='log-uniform', name='learning_rate', num_samples=num_samples),
Integer(low=10, high=100, name='min_child_samples', num_samples=num_samples),
Integer(low=20, high=500, name='n_estimators', num_samples=num_samples)],
"x0":
['rf', 10, 0.001, 10, 20]},
"LinearDiscriminantAnalysis": {
"param_space": [
Categorical(categories=[False, True], name='store_covariance'),
Integer(low=2, high=100, name='n_components', num_samples=num_samples),
Real(low=1e-6, high=1e-2, name='tol', num_samples=num_samples)],
"x0": [True, 2, 1e-4]},
"LinearSVC": {
"param_space": [
Categorical(categories=[True, False], name='dual'),
Real(low=1.0, high=5.0, name='C', num_samples=10),
Integer(low=100, high=1000, name='max_iter', num_samples=num_samples),
Real(low=1e-5, high=1e-1, name='tol', num_samples=10),
Categorical(categories=[True, False], name='fit_intercept')],
"x0":
[True, 1.0, 100, 1e-4, True]},
"LogisticRegression": {
"param_space": [
#Categorical(categories=[True, False], name='dual'),
Real(low=1e-5, high=1e-1, name='tol', num_samples=num_samples),
Real(low=0.5, high=5.0, name='C', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=100, high=1000, name='max_iter', num_samples=10)
#Categorical(categories=['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'], name='solver')
],
"x0":
[1e-6, 1.0, True, 100]},
"MLPClassifier": {
"param_space": [
Integer(low=10, high=500, name='hidden_layer_sizes', num_samples=num_samples),
Categorical(categories=['identity', 'logistic', 'tanh', 'relu'], name='activation'),
Categorical(categories=['lbfgs', 'sgd', 'adam'], name='solver'),
Real(low=1e-6, high=1e-3, name='alpha', num_samples=num_samples),
# Real(low=1e-6, high=1e-3, name='learning_rate')
Categorical(categories=['constant', 'invscaling', 'adaptive'], name='learning_rate'), ],
"x0":
[10, 'relu', 'adam', 1e-6, 'constant']},
"NearestCentroid": {
"param_space": [
Real(low=1, high=50, name='shrink_threshold', num_samples=num_samples)],
"x0":
[5]},
"NuSVC": {
"param_space": [
Real(low=0.5, high=0.9, name='nu', num_samples=num_samples),
Integer(low=100, high=1000, name='max_iter', num_samples=num_samples),
Real(low=1e-5, high=1e-1, name='tol', num_samples=num_samples),
Real(low=100, high=500, name='cache_size', num_samples=num_samples)],
"x0":
[0.5, 100, 1e-5, 100]},
"PassiveAggressiveClassifier": {
"param_space": [
Real(low=1.0, high=5.0, name='C', num_samples=num_samples),
Real(low=0.1, high=1.0, name='validation_fraction', num_samples=num_samples),
Real(low=1e-4, high=1e-1, name='tol', num_samples=num_samples),
Integer(low=100, high=1000, name='max_iter', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept')],
"x0": [1.0, 0.1, 1e-4, 200, True]},
"Perceptron": {
"param_space": [
Real(low=1e-6, high=1e-2, name='alpha', num_samples=num_samples),
Real(low=0.1, high=1.0, name='validation_fraction', num_samples=num_samples),
Real(low=1e-4, high=1e-1, name='tol', num_samples=num_samples),
Integer(low=100, high=1000, name='max_iter', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept')],
"x0":
[1e-4, 0.1, 1e-3, 200, True]},
"QuadraticDiscriminantAnalysis": {
"param_space": [
Real(low=0.0, high=1.0, name='reg_param', num_samples=num_samples),
Real(low=1e-4, high=1e-1, name='tol', num_samples=num_samples),
Categorical(categories=[True, False], name='store_covariance')],
"x0":
[0.1, 1e-3, True]},
"RadiusNeighborsClassifier": {
"param_space": [
Categorical(categories=['uniform', 'distance'], name='weights'),
Categorical(categories=['auto', 'ball_tree', 'kd_tree', 'brute'], name='algorithm'),
Integer(low=10, high=300, name='leaf_size', num_samples=num_samples),
Integer(low=1, high=5, name='p', num_samples=num_samples)],
"x0":
['uniform', 'auto', 10, 1]},
"RandomForestClassifier": {
"param_space": [
Integer(low=50, high=1000, name='n_estimators', num_samples=num_samples),
Integer(low=3, high=30, name='max_depth', num_samples=num_samples),
Integer(low=2, high=10, name='min_samples_split', num_samples=num_samples),
Real(low=0.0, high=0.5, name='min_weight_fraction_leaf', num_samples=num_samples),
Categorical(categories=['auto', 'sqrt', 'log2'], name='max_features')],
"x0":
[100, 5, 2, 0.2, 'auto']},
"RidgeClassifier": {
"param_space": ridge_cls.space,
"x0":
ridge_cls.x0},
"RidgeClassifierCV": {
"param_space": ridge_cls_cv.space,
"x0":
ridge_cls_cv.x0},
"SGDClassifier": {
"param_space": [
Categorical(categories=['l1', 'l2', 'elasticnet'], name='penalty'),
Real(low=1e-6, high=1e-2, name='alpha', num_samples=num_samples),
Real(low=0.0, high=1.0, name='eta0', num_samples=num_samples),
Categorical(categories=[True, False], name='fit_intercept'),
Integer(low=500, high=5000, name='max_iter', num_samples=num_samples),
Categorical(categories=['constant', 'optimal', 'invscaling', 'adaptive'], name='learning_rate')],
"x0":
['l2', 1e-4, 0.5,True, 1000, 'invscaling']},
"SVC": {
"param_space": [
Real(low=1.0, high=5.0, name='C', num_samples=num_samples),
Real(low=1e-5, high=1e-1, name='tol', num_samples=num_samples),
Real(low=200, high=1000, name='cache_size', num_samples=num_samples)],
"x0":
[1.0, 1e-3, 200]},
"XGBClassifier": {
"param_space": [
# Number of gradient boosted trees
Integer(low=5, high=50, name='n_estimators', num_samples=num_samples),
# Maximum tree depth for base learners
Integer(low=3, high=30, name='max_depth', num_samples=num_samples),
Real(low=0.0001, high=0.5, prior='log-uniform', name='learning_rate', num_samples=num_samples), #
Categorical(categories=['gbtree', 'gblinear', 'dart'], name='booster'),
Real(low=0.1, high=0.9, name='gamma', num_samples=num_samples),
# Minimum loss reduction required to make a further partition on a leaf node of the tree.
Real(low=0.1, high=0.9, name='min_child_weight', num_samples=num_samples),
# Minimum sum of instance weight(hessian) needed in a child.
Real(low=0.1, high=0.9, name='max_delta_step', num_samples=num_samples),
# Maximum delta step we allow each tree’s weight estimation to be.
# Subsample ratio of the training instance.
Real(low=0.1, high=0.9, name='subsample', num_samples=num_samples),
Real(low=0.1, high=0.9, name='colsample_bytree', num_samples=num_samples),
Real(low=0.1, high=0.9, name='colsample_bylevel', num_samples=num_samples),
Real(low=0.1, high=0.9, name='colsample_bynode', num_samples=num_samples),
Real(low=0.1, high=0.9, name='reg_alpha', num_samples=num_samples),
Real(low=0.1, high=0.9, name='reg_lambda', num_samples=num_samples)],
"x0":
[10, 3, 0.0001, 'gbtree', 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]},
"XGBRFClassifier": {
"param_space": [
# Number of gradient boosted trees
Integer(low=5, high=50, name='n_estimators', num_samples=num_samples),
# Maximum tree depth for base learners
Integer(low=3, high=30, name='max_depth', num_samples=num_samples),
Real(low=0.0001, high=0.5, prior='log-uniform', name='learning_rate', num_samples=num_samples), #
Categorical(categories=['gbtree', 'gblinear', 'dart'], name='booster'),
Real(low=0.1, high=0.9, name='gamma', num_samples=num_samples),
# Minimum loss reduction required to make a further partition on a leaf node of the tree.
Real(low=0.1, high=0.9, name='min_child_weight', num_samples=num_samples),
# Minimum sum of instance weight(hessian) needed in a child.
Real(low=0.1, high=0.9, name='max_delta_step', num_samples=num_samples),
# Maximum delta step we allow each tree’s weight estimation to be.
# Subsample ratio of the training instance.
Real(low=0.1, high=0.9, name='subsample', num_samples=num_samples),
Real(low=0.1, high=0.9, name='colsample_bytree', num_samples=num_samples),
Real(low=0.1, high=0.9, name='colsample_bylevel', num_samples=num_samples),
Real(low=0.1, high=0.9, name='colsample_bynode', num_samples=num_samples),
Real(low=0.1, high=0.9, name='reg_alpha', num_samples=num_samples),
Real(low=0.1, high=0.9, name='reg_lambda', num_samples=num_samples)],
"x0":
[10, 3, 0.0001, 'gbtree', 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]},
"CatBoostClassifier": {
"param_space": [
# maximum number of trees that can be built
Integer(low=20, high=100, name='iterations', num_samples=num_samples),
# Used for reducing the gradient step.
Real(low=0.0001, high=0.5, prior='log-uniform', name='learning_rate', num_samples=num_samples),
# depth
# https://stackoverflow.com/q/67299869/5982232
Integer(1, 15, name="depth", num_samples=num_samples),
# Coefficient at the L2 regularization term of the cost function.
Real(low=0.5, high=5.0, name='l2_leaf_reg', num_samples=num_samples),
# arger the value, the smaller the model size.
Real(low=0.1, high=10, name='model_size_reg', num_samples=num_samples),
# percentage of features to use at each split selection, when features are selected over again at random.
Real(low=0.1, high=0.95, name='rsm', num_samples=num_samples),
# number of splits for numerical features
Integer(low=32, high=1032, name='border_count', num_samples=num_samples),
# The quantization mode for numerical features. The quantization mode for numerical features.
Categorical(categories=['Median', 'Uniform', 'UniformAndQuantiles',
'MaxLogSum', 'MinEntropy', 'GreedyLogSum'], name='feature_border_type')],
"x0":
[100, 0.01, 5, 3.0, 0.5, 0.5, 32, 'GreedyLogSum']}
}
# remove the estimators from those libraries which are not available/installed
libraries_to_models = {
'catboost': ['CatBoostClassifier'],
'xgboost': ['XGBRFClassifier', 'XGBClassifier'],
'lightgbm': ['LGBMClassifier']
}
_remove_estimator(spaces, libraries_to_models, verbosity)
return spaces
def dl_space(
num_samples:int=10
)->dict:
spaces = {
"MLP": {
"param_space":[
Integer(8, 128, name="units", num_samples=num_samples),
Categorical([1, 2, 3], name="num_layers"),
Real(0.0, 0.4, name="dropout", num_samples=num_samples),
Categorical(["relu", "linear", "leakyrelu", "elu", "tanh", "sigmoid"],
name="activation")],
'x0':
[32, 1, 0.0, "relu"]},
"LSTM":{
"param_space": [
Integer(8, 128, name="units", num_samples=num_samples),
Categorical([1, 2, 3], name="num_layers"),
Real(0.0, 0.4, name="dropout", num_samples=num_samples),
Categorical(["relu", "leakyrelu", "elu", "tanh", "sigmoid"],
name="activation")],
'x0':
[32, 1, 0.0, "relu"]},
"CNN": {
"param_space": [
Integer(8, 128, name="filters", num_samples=num_samples),
Categorical([2,3,4,5], name="kernel_size"),
Categorical([1, 2, 3], name="num_layers"),
Real(0.0, 0.4, name="dropout"),
Categorical(["relu", "leakyrelu", "elu", "tanh", "sigmoid"],
name="activation")],
"x0":
[32, 2, 1, 0.0, "relu"]},
"CNNLSTM": {
"param_space": [
Categorical([1,2,3], name="cnn_layers"),
Categorical([1, 2, 3], name="lstm_layers"),
Integer(8, 128, name="units", num_samples=num_samples),
Integer(8, 128, name="filters", num_samples=num_samples),
Categorical([2,3,4,5], name="kernel_size")],
"x0":
[2, 1, 32, 32, 2]},
"LSTMAutoEncoder": {
"param_space": [
Integer(8, 128, name="encoder_units", num_samples=num_samples),
Integer(8, 128, name="decoder_units", num_samples=num_samples),
Categorical([1,2,3], name="encoder_layers"),
Categorical([1,2,3], name="decoder_layers")],
"x0":
[32, 32, 1, 1]},
"TCN": {
"param_space": [
Integer(16, 128, name="filters", num_samples=num_samples),
Categorical([2,3,4,5], name="kernel_size")],
"x0":
[64, 2]},
"TFT": {
"param_space":[
Integer(16, 128, name="hidden_units", num_samples=num_samples),
Categorical([1, 2, 3, 4, 5], name="num_heads")],
"x0":
[64, 2]}
}
return spaces
def _remove_estimator(spaces, libraries_to_models, verbosity=0):
for lib, estimators in libraries_to_models.items():
if lib not in sys.modules:
for estimator in estimators:
if verbosity>0:
print(f"excluding {estimator} because library {lib} is not found")
spaces.pop(estimator)
return spaces
def regression_models()->list:
"""returns availabel regression models as list"""
return list(regression_space(5,5).keys())
def classification_models()->list:
"""returns availabel classification models as list"""
return list(classification_space(5,0).keys())
class _ridge_classifier(object):
def __init__(self, num_samples=10):
import sklearn
self.space = [
Real(low=1.0, high=5.0, name='alpha', num_samples=num_samples),
Real(low=1e-4, high=1e-1, name='tol', num_samples=num_samples),
Categorical(categories=[True, False], name='normalize'),
Categorical(categories=[True, False], name='fit_intercept')]
self.x0 = [1.0, 1e-3, True, True]
if sklearn.__version__ > "1.0.0":
self.space.pop(2)
self.x0.pop(2)
class _ridge_classifiercv(object):
def __init__(self):
import sklearn
self.space = [
Categorical(categories=[1e-3, 1e-2, 1e-1, 1], name='alphas'),
Categorical(categories=[True, False], name='normalize'),
Categorical(categories=[True, False], name='fit_intercept')
]
self.x0 = [1.0, True, True]
if sklearn.__version__ > "1.0.0":
self.space.pop(1)
self.x0.pop(1) | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/experiments/utils.py | utils.py |
import warnings
from collections import OrderedDict
from typing import Callable
from typing import List
from typing import Optional
from ai4water.backend import np
from optuna.logging import get_logger
from optuna._transform import _SearchSpaceTransform
from optuna.study import Study
from optuna.trial import FrozenTrial
from optuna.trial import TrialState
from optuna.importance._fanova import FanovaImportanceEvaluator
from optuna.visualization._utils import _check_plot_args
from optuna.importance import get_param_importances
#from optuna.importance._fanova import FanovaImportanceEvaluator
try:
from optuna.visualization._plotly_imports import go
from optuna.visualization._plotly_imports import _imports
except (ImportError, ModuleNotFoundError):
go, _imports = None, None
from easy_mpl import bar_chart
logger = get_logger(__name__)
def _get_distributions(study, params):
# based on supposition that get_distributions only returns an ordered dictionary and requiring `storage` attribute
# of study is redundant
assert params is None
trial = study.trials[0]
return OrderedDict(trial.distributions)
class ImportanceEvaluator1(FanovaImportanceEvaluator):
def evaluate(
self,
study: Study,
params: Optional[List[str]] = None,
*,
target: Optional[Callable[[FrozenTrial], float]] = None,
):
if target is None and study._is_multi_objective():
raise ValueError(
"If the `study` is being used for multi-objective optimization, "
"please specify the `target`."
)
distributions = _get_distributions(study, params)
if len(distributions) == 0:
return OrderedDict()
trials = []
for trial in study.trials:
if trial.state != TrialState.COMPLETE:
continue
if any(name not in trial.params for name in distributions.keys()):
continue
trials.append(trial)
trans = _SearchSpaceTransform(distributions, transform_log=False, transform_step=False)
n_trials = len(trials)
trans_params = np.empty((n_trials, trans.bounds.shape[0]), dtype=np.float64)
trans_values = np.empty(n_trials, dtype=np.float64)
for trial_idx, trial in enumerate(trials):
trans_params[trial_idx] = trans.transform(trial.params)
trans_values[trial_idx] = trial.value if target is None else target(trial)
# if nan values are present in target, use mean to fill them
nan_idx = np.isnan(trans_values)
if nan_idx.any():
warnings.warn("Nan value encountered in target values",
UserWarning)
# fill nan values with mean
trans_values[nan_idx] = np.nanmean(trans_values)
# if inf values are present in target, fill them with max
inf_idx = np.isinf(trans_values)
if inf_idx.any():
warnings.warn("Infinity value encountered in target values",
UserWarning)
# first convert infs to NaNs with np.isinf masking and then NaNs to max values
trans_values[inf_idx] = np.nan
trans_values[np.isnan(trans_values)] = np.nanmax(trans_values)
trans_bounds = trans.bounds
column_to_encoded_columns = trans.column_to_encoded_columns
if trans_params.size == 0: # `params` were given but as an empty list.
return OrderedDict()
# Many (deep) copies of the search spaces are required during the tree traversal and using
# Optuna distributions will create a bottleneck.
# Therefore, search spaces (parameter distributions) are represented by a single
# `numpy.ndarray`, coupled with a list of flags that indicate whether they are categorical
# or not.
evaluator = self._evaluator
evaluator.fit(
X=trans_params,
y=trans_values,
search_spaces=trans_bounds,
column_to_encoded_columns=column_to_encoded_columns,
)
importances = {}
variance = {}
for i, name in enumerate(distributions.keys()):
try:
_mean, _std = evaluator.get_importance((i,))
except TypeError:
# in newer optuna versions, it requires integer not list
_mean, _std = evaluator.get_importance(i)
importances[name] = _mean
variance[name] = {'mean': _mean, 'std': _std}
total_importance = sum(importances.values())
for name in importances:
importances[name] /= total_importance
sorted_importances = OrderedDict(
reversed(
sorted(importances.items(), key=lambda name_and_importance: name_and_importance[1])
)
)
self.importance_paras = variance
return sorted_importances
def plot_param_importances(
study: Study,
evaluator=None,
params: Optional[List[str]] = None,
*,
target: Optional[Callable[[FrozenTrial], float]] = None,
target_name: str = "Objective Value",
):
_check_plot_args(study, target, target_name)
if go is not None:
_imports.check()
layout = go.Layout(
title="Hyperparameter Importances",
xaxis={"title": f"Importance for {target_name}"},
yaxis={"title": "Hyperparameter"},
showlegend=False,
)
# Importances cannot be evaluated without completed trials.
# Return an empty figure for consistency with other visualization functions.
trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]
if go and len(trials) == 0:
logger.warning("Study instance does not contain completed trials.")
return go.Figure(data=[], layout=layout)
if evaluator is None:
evaluator = ImportanceEvaluator1()
try:
importances = get_param_importances(
study, evaluator=evaluator, params=params, target=target
)
except RuntimeError: # sometimes it is returning error e.g. when number of trials are < 4
return None, None, None
importances = OrderedDict(reversed(list(importances.items())))
importance_values = list(importances.values())
param_names = list(importances.keys())
ax = bar_chart(importance_values, param_names, orient='h', show=False,
ax_kws={'title':"fANOVA hyperparameter importance",
'xlabel':"Relative Importance"})
return importances, evaluator.importance_paras, ax
def _get_distribution(param_name: str, study: Study):
for trial in study.trials:
if param_name in trial.distributions:
return trial.distributions[param_name]
assert False | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/hyperopt/_optuna_fanova.py | _optuna_fanova.py |
import json
import copy
import inspect
import warnings
from typing import Union, Dict
from collections import OrderedDict
from .utils import plot_convergences
from .utils import get_one_tpe_x_iter
from .utils import to_skopt_as_dict
from .utils import post_process_skopt_results
from .utils import to_skopt_space
from .utils import save_skopt_results
from .utils import Dimension
from .utils import plot_convergence
from ._space import Categorical, Real, Integer
from .utils import sort_x_iters, x_iter_for_tpe
from .utils import loss_histogram, plot_hyperparameters
from ai4water.utils.utils import JsonEncoder
from ai4water.utils.utils import clear_weights
from ai4water.utils.utils import jsonize, dateandtime_now
from ai4water.utils.visualizations import edf_plot
from ai4water.backend import hyperopt as _hyperopt
from ai4water.utils.utils import create_subplots
from ai4water.backend import np, pd, plt, os, sklearn, optuna, plotly, skopt, easy_mpl
GridSearchCV = sklearn.model_selection.GridSearchCV
RandomizedSearchCV = sklearn.model_selection.RandomizedSearchCV
ParameterGrid = sklearn.model_selection.ParameterGrid
ParameterSampler = sklearn.model_selection.ParameterSampler
bar_chart = easy_mpl.bar_chart
parallel_coordinates = easy_mpl.parallel_coordinates
if skopt is None:
pass
else:
Space = skopt.space.space.Space
#Dimension = skopt.space.space.Dimension
forest_minimize = skopt.forest_minimize
gp_minimize = skopt.gp_minimize
BayesSearchCV = skopt.BayesSearchCV
use_named_args = skopt.utils.use_named_args
from skopt.plots import plot_evaluations
if _hyperopt is not None:
space_eval = _hyperopt.space_eval
hp = _hyperopt.hp
miscs_to_idxs_vals = _hyperopt.base.miscs_to_idxs_vals
Apply = _hyperopt.pyll.base.Apply
fmin_hyperopt = _hyperopt.fmin
tpe = _hyperopt.tpe
STATUS_OK = _hyperopt.STATUS_OK
Trials = _hyperopt.Trials
rand = _hyperopt.rand
else:
space_eval, hp = None, None
miscs_to_idxs_vals = None
Apply = None
fmin_hyperopt = None
tpe = None
STATUS_OK = None
Trials = None
rand = None
if _hyperopt is not None:
try: # atpe is only available in later versions of hyperopt
atpe = _hyperopt.atpe
except AttributeError:
atpe = None
else:
atpe = None
if optuna is None:
plot_contour = None
else:
plot_contour = optuna.visualization.plot_contour
from ._fanova import fANOVA
# TODO RayTune libraries under the hood https://docs.ray.io/en/master/tune/api_docs/suggestion.html#summary
# TODO add generic algorithm, deap/pygad
# TODO skopt provides functions other than gp_minimize, see if they are useful and can be used.
# todo loading gpmin_results is not consistent.
SEP = os.sep
COUNTER = 0
ALGORITHMS = {
'bayes': {},
'bayes_rf': {'name': 'decision_tree', 'backend': ['skopt']},
'gbrt': {'name': 'gradient-boosted-tree regression', 'backend': ['skopt']},
'tpe': {'name': 'Tree of Parzen Estimators', 'backend': ['hyperopt', 'optuna']},
'atpe': {'name': 'Adaptive Tree of Parzen Estimators', 'backend': ['hyperopt']},
'random': {'name': 'random search', 'backend': ['sklearn', 'optuna', 'hyperopt']},
'grid': {'name': 'grid search', 'backend': ['sklearn', 'optuna']},
'cmaes': {'name': 'Covariance Matrix Adaptation Evolution Strategy', 'backend': ['optuna']}
}
class HyperOpt(object):
"""
The purpose of this class is to provide a uniform and simplifed interface to
use `hyperopt`, `optuna`, `scikit-optimize` and `scikit-learn` based hyperparameter
optimization methods. Ideally this class should provide all the functionalities of
beforementioned libaries with a uniform interface. It however also complements
these libraries by combining their functionalities and adding some additional
functionalities to them. On the other hand this class should not limit or
complicate the use of its underlying libraries. This means all the functionalities
of underlying libraries are available in this class as well. Moreover, you can
use this class just as you use one of its underlying library.
The purpose here is to make a class which allows application of any of the
available optimization methods on any type of model/classifier/regressor. If the
classifier/regressor is of sklearn-based, then for random search, we use
RanddomSearchCV_, for grid search, we use GridSearchCV_ and for Bayesian, we
use BayesSearchCV_ . On the other hand, if the model is not sklearn-based, you
will still be able to implement any of the three methods. In such case, the
bayesian_ will be implemented using `gp_minimize`. Random search and grid search
will be done by simple iterating over the sample space generated as in sklearn
based samplers. However, the post-processing of the results is (supposed to be)
done same as is done in RandomSearchCV and GridSearchCV.
The class is expected to pass all the tests written in sklearn or skopt for
corresponding classes.
For detailed use of this class see this `hpo_tutorial`_
Attributes
--------------
- results dict:
- gpmin_results dict:
- skopt_results :
- hp_space :
- space
- skopt_space :
- space dict:
- title str: name of the folder in which all results will be saved. By
default this is same as name of `algorithm`. For `AI4Water` based
models, this is more detailed, containing problem type etc.
Methods
-------
- eval_with_best: evaluates the objective_fn on best parameters
- best_paras(): returns the best parameters from optimization.
The following examples illustrate how we can uniformly apply different optimization algorithms.
Examples
--------
>>> from ai4water import Model
>>> from ai4water.hyperopt import HyperOpt, Categorical, Integer, Real
>>> from ai4water.datasets import busan_beach
>>> from SeqMetrics import RegressionMetrics
>>> data = busan_beach()
>>> input_features = ['tide_cm', 'wat_temp_c', 'sal_psu', 'air_temp_c', 'pcp_mm', 'pcp3_mm']
>>> output_features = ['tetx_coppml']
We have to define an objective function which will take keyword arguments
and return a scaler value as output. This scaler value will be minized during optimzation
>>> def objective_fn(**suggestion)->float:
... # the objective function must receive new parameters as keyword arguments
... model = Model(
... input_features=input_features,
... output_features=output_features,
... model={"XGBRegressor": suggestion},
... verbosity=0)
...
... model.fit(data=data)
...
... t, p = model.predict(return_true=True)
... mse = RegressionMetrics(t, p).mse()
... # the objective function must return a scaler value which needs to be minimized
... return mse
Define search space
The search splace determines pool from which parameters are chosen during optimization.
>>> num_samples=5 # only relavent for random and grid search
>>> search_space = [
... Categorical(['gbtree', 'dart'], name='booster'),
... Integer(low=1000, high=2000, name='n_estimators', num_samples=num_samples),
... Real(low=1.0e-5, high=0.1, name='learning_rate', num_samples=num_samples)
... ]
... # Using Baysian with gaussian processes
>>> optimizer = HyperOpt('bayes', objective_fn=objective_fn, param_space=search_space,
... num_iterations=num_iterations )
>>> optimizer.fit()
Using TPE with optuna
>>> num_iterations = 10
>>> optimizer = HyperOpt('tpe', objective_fn=objective_fn, param_space=search_space,
... backend='optuna',
... num_iterations=num_iterations )
>>> optimizer.fit()
Using cmaes with optuna
>>> optimizer = HyperOpt('cmaes', objective_fn=objective_fn, param_space=search_space,
... backend='optuna',
... num_iterations=num_iterations )
>>> optimizer.fit()
Using random with optuna, we can also try hyperopt and sklearn as backend for random algorithm
>>> optimizer = HyperOpt('random', objective_fn=objective_fn, param_space=search_space,
... backend='optuna',
... num_iterations=num_iterations )
>>> optimizer.fit()
Using TPE of hyperopt
>>> optimizer = HyperOpt('tpe', objective_fn=objective_fn, param_space=search_space,
... backend='hyperopt',
... num_iterations=num_iterations )
>>> optimizer.fit()
Using grid with sklearn
>>> optimizer = HyperOpt('grid', objective_fn=objective_fn, param_space=search_space,
... backend='sklearn',
... num_iterations=num_iterations )
>>> optimizer.fit()
.. _hpo_tutorial:
https://ai4water-examples.readthedocs.io/en/latest/auto_examples/index.html#hyperparameter-optimization
.. _GridSearchCV:
https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
.. _RanddomSearchCV:
https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.RandomizedSearchCV.html
.. _BayesSearchCV:
https://scikit-optimize.github.io/stable/modules/generated/skopt.BayesSearchCV.html
.. _bayesian:
https://github.com/scikit-optimize/scikit-optimize/blob/9334d50a1ad5c9f7c013a1c1cb95313a54b83168/examples/bayesian-optimization.py#L109
"""
def __init__(
self,
algorithm: str, *,
param_space,
objective_fn,
eval_on_best: bool = False,
backend: str = None,
opt_path: str = None,
process_results: bool = True,
verbosity: int = 1,
**kwargs
):
"""
Initializes the class
Parameters
----------
algorithm : str
must be one of ``random``, ``grid``, ``bayes``, ``bayes_rf``, and ``tpe``, defining which
optimization algorithm to use.
objective_fn : callable
Any callable function whose returned value is to be minimized.
It can also be either sklearn/xgboost based regressor/classifier.
param_space : list, dict
the search space of parameters to be optimized. We recommend the use
of Real, Integer and categorical classes from [ai4water.hyperopt][ai4water.hyperopt.Integer]
(not from skopt.space). These classes allow a uniform way of defining
the parameter space for all the underlying libraries. However, to
make this class work exactly similar to its underlying libraries,
the user can also define parameter space as is defined in its
underlying libraries. For example, for hyperopt based method like
'tpe' the parameter space can be specified as in the examples of
hyperopt library. In case the code breaks, please report.
eval_on_best : bool, optional
if True, then after optimization, the objective_fn will
be evaluated on best parameters and the results will be stored in the
folder named "best" inside `title` folder.
opt_path :
path to save the results
backend : str, optional
Defines which backend library to use for the `algorithm`. For
example the user can specify whether to use `optuna` or `hyper_opt`
or `sklearn` for `grid` algorithm.
verbosity : bool, optional
determines amount of information being printed
**kwargs :
Any additional keyword arguments will for the underlying optimization
algorithm. In case of using AI4Water model, these must be arguments
which are passed to AI4Water's Model class.
"""
if algorithm not in ALGORITHMS:
raise ValueError(f"""Invalid value of algorithm provided. Allowd values for algorithm"
are {list(ALGORITHMS.keys())}.
You provided {algorithm}""")
self.objective_fn = objective_fn
self.algorithm = algorithm
self.backend = backend
self.param_space = param_space
self.original_space = param_space # todo self.space and self.param_space should be combined.
self.title = self.algorithm
self.results = OrderedDict() # internally stored results
self.gpmin_results = None #
self.data = None
self.eval_on_best = eval_on_best
self.opt_path = opt_path
self._process_results = process_results
self.objective_fn_is_dl = False
self.verbosity = verbosity
self.gpmin_args = self.check_args(**kwargs)
if self.use_sklearn:
if self.algorithm == "random":
self.optfn = RandomizedSearchCV(estimator=objective_fn, param_distributions=param_space, **kwargs)
else:
self.optfn = GridSearchCV(estimator=objective_fn, param_grid=param_space, **kwargs)
elif self.use_skopt_bayes:
self.optfn = BayesSearchCV(estimator=objective_fn, search_spaces=param_space, **kwargs)
@property
def backend(self):
return self._backend
@backend.setter
def backend(self, x):
if x is not None:
assert x in ['optuna', 'hyperopt', 'sklearn', 'skopt'], f"""
Backend must be one of hyperopt, optuna or sklearn but is is {x}"""
if self.algorithm == 'tpe':
if x is None:
x = 'optuna'
assert x in ['optuna', 'hyperopt']
elif self.algorithm == 'cmaes':
if x is None:
x = 'optuna'
assert x == 'optuna'
elif self.algorithm == 'atpe':
if x is None:
x = 'hyperopt'
assert x == 'hyperopt'
elif self.algorithm == 'random':
if x is None:
x = 'sklearn'
assert x in ['optuna', 'hyperopt', 'sklearn']
elif self.algorithm == 'grid':
if x is None:
x = 'sklearn'
assert x in ['sklearn', 'optuna']
elif self.algorithm in ['bayes', "bayes_rf"]:
if x is None:
x = 'skopt'
else:
raise ValueError
if x == 'hyperopt' and _hyperopt is None:
raise ValueError(f"You must install `hyperopt` to use it as backend for {self.algorithm} algorithm.")
if x == 'optuna' and optuna is None:
raise ValueError(f"You must install optuna to use `optuna` as backend for {self.algorithm} algorithm")
self._backend = x
@property
def title(self):
return self._title
@title.setter
def title(self, x):
self._title = x + '_' + str(dateandtime_now())
@property
def objective_fn_is_dl(self):
return self._objective_fn_is_dl
@objective_fn_is_dl.setter
def objective_fn_is_dl(self, x):
self._objective_fn_is_dl = x
def check_args(self, **kwargs):
kwargs = copy.deepcopy(kwargs)
if 'n_initial_points' in kwargs:
if int(''.join(skopt.__version__.split('.')[1])) < 8:
raise ValueError(f"""
'n_initial_points' argument is not available in skopt version < 0.8.
However you are using skopt version {skopt.__version__} .
See https://scikit-optimize.github.io/stable/modules/generated/skopt.gp_minimize.html#skopt.gp_minimize
for more details.
""""")
if 'x0' in kwargs and self.algorithm in ['tpe', 'atpe', 'random', 'grid', 'cmaes']:
kwargs.pop('x0')
return kwargs
def __getattr__(self, item):
# TODO, not sure if this is the best way but venturing since it is done by the legend
# here https://github.com/philipperemy/n-beats/blob/master/nbeats_keras/model.py#L166
# Since it was not possible to inherit this class from BaseSearchCV and BayesSearchCV at the same time, this
# hack makes sure that all the functionalities of GridSearchCV, RandomizeSearchCV and BayesSearchCV are also
# available with class.
if self.use_sklearn or self.use_skopt_bayes:
return getattr(self.optfn, item)
else:
raise AttributeError(f"HyperOpt does not have attribute {item}")
@property
def param_space(self):
return self._param_space
@param_space.setter
def param_space(self, x):
if self.algorithm in ["bayes", "bayes_rf"]:
assert Dimension is not None, f"you must have scikit-optimize installed to use {self.algorithm}."
if isinstance(x, dict):
_param_space = []
for k, v in x.items():
assert isinstance(v, Dimension), f"""
space for parameter {k} is of invalid type {v.__class__.__name__}.
For {self.algorithm}, it must be of type {Dimension.__name__}
"""
_param_space.append(v)
else:
assert isinstance(x, list), f"""
param space must be list of parameters but it is of type
{x.__class__.__name__}"""
for space in x:
# each element in the list can be a tuple of lower and and upper bounds
if not isinstance(space, tuple):
assert isinstance(space, Dimension), f"""
param space must be one of Integer, Real or Categorical
but it is of type {space.__class__.__name__}"""
_param_space = x
elif self.algorithm in ["random", "grid"] and self.backend != 'optuna':
# todo, do we also need to provide grid of sample space for random??
if isinstance(x, dict):
_param_space = x
elif isinstance(x, list):
_param_space = {}
for _space in x:
assert isinstance(_space, Dimension)
_param_space[_space.name] = _space.grid
else:
raise ValueError
elif self.algorithm in ['tpe', 'atpe', 'random'] and self.backend == 'hyperopt':
if isinstance(x, list):
# space is provided as list. Either all of them must be hp.space or Dimension.
if isinstance(x[0], Dimension):
_param_space = {}
for space in x:
assert isinstance(space, Dimension)
_param_space[space.name] = space.as_hp()
elif isinstance(x[0], Apply):
_param_space = []
for space in x:
assert isinstance(space, Apply), f"""invalid space type {space.__class__.__name__}"""
_param_space.append(space)
else:
raise NotImplementedError
elif isinstance(x, Dimension): # for single hyper-parameter optimization ?
_param_space = x.as_hp()
else:
_param_space = x
elif self.backend == 'optuna':
if isinstance(x, list):
_param_space = {}
for s in x:
assert isinstance(s, Dimension)
_param_space[s.name] = s
elif isinstance(x, dict):
assert all([isinstance(s, Dimension) for s in x.values()])
_param_space = x
else:
raise NotImplementedError(f"unknown type of space {x.__class__.__name__}")
else:
raise ValueError
self._param_space = _param_space
def skopt_space(self):
"""Tries to make skopt compatible Space object. If unsuccessful, return None"""
return to_skopt_space(self.original_space)
def space(self) -> dict:
"""Returns a skopt compatible space but as dictionary"""
return to_skopt_as_dict(self.algorithm, self.backend, self.original_space)
@property
def use_sklearn(self):
# will return True if we are to use sklearn's GridSearchCV or RandomSearchCV
if self.algorithm in ["random", "grid"] and "sklearn" in str(type(self.objective_fn)):
return True
return False
@property
def use_skopt_bayes(self):
# will return true if we have to use skopt based BayesSearchCV
if self.algorithm in ["bayes", "bayes_rf"] and "sklearn" in str(type(self.objective_fn)):
assert not self.use_sklearn
return True
return False
@property
def use_skopt_gpmin(self):
# will return True if we have to use skopt based gp_minimize function. This is to implement Bayesian on
# non-sklearn based models
if self.algorithm in ["bayes", "bayes_rf"] and "sklearn" not in str(type(self.objective_fn)):
assert not self.use_sklearn
assert not self.use_skopt_bayes
return True
return False
@property
def use_tpe(self):
if self.algorithm in ['tpe', 'atpe', 'random'] and self.backend == 'hyperopt':
return True
else:
return False
@property
def use_own(self):
# return True, we have to build our own optimization method.
if not self.use_sklearn and not self.use_skopt_bayes and not self.use_skopt_gpmin:
return True
return False
@property
def random_state(self):
if "random_state" not in self.gpmin_args:
return np.random.RandomState(313)
else:
return np.random.RandomState(self.gpmin_args['random_state'])
@property
def num_iterations(self):
if self.backend == 'sklearn' and self.algorithm == 'grid':
self.gpmin_args['num_iterations'] = len(ParameterGrid(self.param_space))
if 'num_iterations' in self.gpmin_args:
return self.gpmin_args['num_iterations']
if self.algorithm in ['tpe', 'atpe', 'random'] and self.backend == 'hyperopt':
return self.gpmin_args.get('max_evals', 9223372036854775807)
if self.backend == 'optuna':
return self.gpmin_args.get('n_trials', None) # default value of n_trials is None in study.optimize()
if 'n_calls' in self.gpmin_args:
return self.gpmin_args['n_calls']
return self.gpmin_args['n_iter']
@property
def use_named_args(self):
argspec = inspect.getfullargspec(self.objective_fn)
if argspec.varkw is None:
return False
elif isinstance(argspec.varkw, str):
return True
else:
raise NotImplementedError
@property
def opt_path(self):
return self._opt_path
@opt_path.setter
def opt_path(self, path):
if path is None:
path = os.path.join(os.getcwd(), f"results{SEP}" + self.title)
if not os.path.exists(path):
os.makedirs(path)
elif not os.path.exists(path):
os.makedirs(path)
self._opt_path = path
def best_paras(self, as_list=False) -> Union[list, dict]:
# returns best parameters either as dictionary or as list
if self.use_skopt_gpmin:
xys = self.xy_of_iterations()
paras = xys[self.best_iter()]['x']
elif self.backend == 'hyperopt':
d = get_one_tpe_x_iter(self.trials.best_trial['misc']['vals'], self.hp_space())
if as_list:
return list(d.values())
else:
return d
elif self.backend == 'optuna':
if as_list:
return list(self.study.best_trial.params.values())
return self.study.best_trial.params
elif self.use_skopt_bayes or self.use_sklearn:
# using BayesSerchCV
paras = self.optfn.best_params_
else:
paras = sort_x_iters(self.results[self.best_iter()]['x'], list(self.param_space.keys()))
if as_list:
return list(paras.values())
return paras
def fit(self, *args, **kwargs):
"""Makes and calls the underlying fit method
parameters
----------
**kwargs :
any keyword arguments for the userdefined objective function
Example
-------
>>> def objective_fn(a=2, b=5, **suggestions)->float:
... # do something e.g calcualte validation score
>>> val_score = 2.0
>>> return val_score
"""
if self.use_sklearn or self.use_skopt_bayes:
fit_fn = self.optfn.fit
elif self.use_skopt_gpmin:
fit_fn = self.own_fit
elif self.use_own:
self.predict = self._predict
if self.algorithm == "grid" and self.backend != 'optuna':
fit_fn = self.grid_search
elif self.algorithm == 'random' and self.backend not in ['optuna', 'hyperopt']:
fit_fn = self.random_search
elif self.backend == 'hyperopt':
fit_fn = self.fmin
elif self.backend == 'optuna':
fit_fn = self.optuna_objective
else:
raise NotImplementedError
else:
raise NotImplementedError(f"""No fit function found for algorithm {self.algorithm}
with backend {self.backend}""")
res = fit_fn(*args, **kwargs)
serialized = self.serialize()
fname = os.path.join(self.opt_path, 'serialized.json')
with open(fname, 'w') as fp:
json.dump(serialized, fp, sort_keys=True, indent=4, cls=JsonEncoder)
return res
def original_para_order(self):
if isinstance(self.param_space, dict):
return list(self.param_space.keys())
elif self.skopt_space() is not None:
names = []
for s in self.skopt_space():
names.append(s.name)
return names
else:
raise NotImplementedError
def dims(self):
# this will be used for gp_minimize
return list(self.param_space)
def model_for_gpmin(self, **kws):
"""
This function can be called in two cases
- The user has made its own objective_fn.
- We make objective_fn using AI4Water and return the error.
In first case, we just return what user has provided.
"""
if callable(self.objective_fn) and not self.use_named_args:
# external function for bayesian but this function does not require named args.
return self.objective_fn
dims = self.dims()
if self.use_named_args:
# external function and this function accepts named args.
@use_named_args(dimensions=dims)
def fitness(**kwargs):
return self.objective_fn(**kwargs, **kws)
return fitness
raise ValueError(f"used named args is {self.use_named_args}")
def own_fit(self, **kws):
"""kws are the keyword arguments to user objective function
by the user
"""
if self.algorithm == "bayes":
minimize_func = gp_minimize
else: # bayes_rf
minimize_func = forest_minimize
kwargs = self.gpmin_args
if 'num_iterations' in kwargs:
kwargs['n_calls'] = kwargs.pop('num_iterations')
try:
search_result = minimize_func(
func=self.model_for_gpmin(**kws),
dimensions=self.dims(),
**kwargs)
except ValueError as e:
if int(''.join(sklearn.__version__.split('.')[1])) > 22:
raise ValueError(f"""
For bayesian optimization, If your sklearn version is above 0.23,
then this error may be related to
https://github.com/kiudee/bayes-skopt/issues/90 .
Try to lower the sklearn version to 0.22 and run again.
{e}
""")
else:
raise ValueError(e)
# the `space` in search_results may not be in same order as originally provided.
space = search_result['space']
if space.__dict__.__len__() > 1:
ordered_sapce = OrderedDict()
for k in self.space().keys():
ordered_sapce[k] = [s for s in space if s.name == k][0]
search_result['space'] = Space(ordered_sapce.values())
self.gpmin_results = search_result
if len(self.results) < 1:
fv = search_result.func_vals
xiters = search_result.x_iters
for idx, y, x in zip(range(len(fv)), fv, xiters):
self.results[idx] = {'y': y, 'x': x}
if self._process_results:
post_process_skopt_results(search_result, self.results,
self.opt_path, rename=True)
if len(search_result.func_vals)<=100 and self.algorithm != "bayes_rf":
save_skopt_results(search_result, self.opt_path)
self.process_results()
if self.eval_on_best:
self.eval_with_best()
return search_result
def save_results(self, results, path:str = None):
"""
saves the hpo results so that they can be loaded
using load_results method.
parameters
----------
results :
hpo results i.e. output of optimizer.fit()
path :
path where to save the results
"""
assert self.algorithm == "bayes"
if path is None:
path = self.opt_path
save_skopt_results(results, path)
return
def eval_sequence(self, params, **kwargs):
""""
kwargs :
any additional keyword arguments for objective_fn
"""
if self.verbosity > 0:
print(f"total number of iterations: {len(params)}")
for idx, para in enumerate(params):
if self.use_named_args: # objective_fn is external but uses kwargs
err = self.objective_fn(**para, **kwargs)
else: # objective_fn is external and does not uses keywork arguments
try:
err = self.objective_fn(*list(para.values()), **kwargs)
except TypeError:
raise TypeError(f"""
use_named_args argument is set to {self.use_named_args}. If your
objective function takes key word arguments, make sure that
this argument is set to True during initiatiation of HyperOpt.""")
err = round(err, 8)
self.results[idx] = {'y':err, 'x':sort_x_iters(para, self.original_para_order())}
if self._process_results:
clear_weights(self.opt_path, self.results, rename=True)
self.process_results()
if self.eval_on_best:
self.eval_with_best()
return self.results
def grid_search(self, **kwargs):
params = list(ParameterGrid(self.param_space))
self.param_grid = params
return self.eval_sequence(params, **kwargs)
def random_search(self, **kwargs):
"""
objective function that will used during random search method.
parameters
----------
kwargs :
keyword arguments in the user defined objective function.
"""
for k, v in self.param_space.items():
if v is None:
grid = self.space()[k].grid
assert grid is not None, f"""grid for parameter {k} could not be created. Inferred grid is
{grid}. Please either provide the `num_samples` parameter while creating space or explicitly
provide grid for {k}"""
param_list = list(ParameterSampler(self.param_space, n_iter=self.num_iterations,
random_state=self.random_state))
if len(param_list) < self.num_iterations:
# we need to correct it so that num_iterations gets calculated correctly next time
self.gpmin_args['n_calls'] = len(param_list)
self.gpmin_args['n_iter'] = len(param_list)
self.param_grid = param_list
return self.eval_sequence(param_list, **kwargs)
def optuna_objective(self, **kwargs):
"""
objective function that will used during random search method.
parameters
----------
kwargs :
keyword arguments in the user defined objective function.
"""
if self.verbosity == 0:
optuna.logging.set_verbosity(optuna.logging.ERROR)
sampler = {
'tpe': optuna.samplers.TPESampler,
'cmaes': optuna.samplers.CmaEsSampler,
'random': optuna.samplers.RandomSampler,
'grid': optuna.samplers.GridSampler
}
def objective(trial):
suggestion = {}
for space_name, _space in self.param_space.items():
suggestion[space_name] = _space.suggest(trial)
return self.objective_fn(**suggestion, **kwargs)
if self.algorithm in ['tpe', 'cmaes', 'random']:
study = optuna.create_study(direction='minimize', sampler=sampler[self.algorithm]())
else:
space = {s.name: s.grid for s in self.skopt_space()}
study = optuna.create_study(sampler=sampler[self.algorithm](space))
study.optimize(objective, n_trials=self.num_iterations)
setattr(self, 'study', study)
if self._process_results:
self.process_results()
return study
def fmin(self, **kwargs):
suggest_options = {
'tpe': tpe.suggest,
'random': rand.suggest
}
if atpe is not None:
suggest_options.update({'atpe': atpe.suggest})
trials = Trials()
model_kws = self.gpmin_args
if 'num_iterations' in model_kws:
model_kws['max_evals'] = model_kws.pop('num_iterations')
space = self.hp_space()
if self.use_named_args:
def objective_fn(kws):
# the objective function in hyperopt library receives a dictionary
return self.objective_fn(**kws)
objective_f = objective_fn
else:
objective_f = self.objective_fn
if len(self.space()) > 1:
space = list(self.hp_space().values())
elif len(self.space()) == 1:
space = list(self.hp_space().values())[0]
else:
raise NotImplementedError
best = fmin_hyperopt(objective_f,
space=space,
algo=suggest_options[self.algorithm],
trials=trials,
**kwargs,
**model_kws)
with open(os.path.join(self.opt_path, 'trials.json'), "w") as fp:
json.dump(jsonize(trials.trials), fp, sort_keys=True, indent=4, cls=JsonEncoder)
setattr(self, 'trials', trials)
# self.results = trials.results
if self._process_results:
self.process_results()
return best
def _predict(self, *args, **params):
if self.use_named_args:
return self.objective_fn(**params)
if callable(self.objective_fn) and not self.use_named_args:
return self.objective_fn(*args)
def hp_space(self) -> dict:
"""returns a dictionary whose values are hyperopt equivalent space instances."""
return {k: v.as_hp(False if self.algorithm == 'atpe' else True) for k,v in self.space().items()}
def xy_of_iterations(self) -> Dict[int,Dict[str, Union[str, dict]]]:
"""returns a dictionary whose keys are iteration numbers are values are xy parirs
at those iterations.
Returns
Dict[int, Dict[str, [dict,float]]]
"""
if self.backend == "optuna":
num_iters = range(self.num_iterations)
results = {}
for idx, trial in zip(num_iters, self.study.trials):
results[idx] = {'y': trial.value, 'x': trial.params}
return results
elif self.backend == "hyperopt":
return x_iter_for_tpe(self.trials, self.hp_space(), as_list=False)
elif self.backend == 'skopt':
if self.use_skopt_bayes:
fv = self.optfn.cv_results_['mean_test_score']
xiters = self.optfn.cv_results_['params']
else:
assert self.gpmin_results is not None, f"gpmin_results is not populated yet"
fv = self.gpmin_results['func_vals']
xiters = self.gpmin_results['x_iters']
results = {}
for idx, y, x in zip(range(len(fv)), fv, xiters):
results[idx] = {'y': y, 'x': self.to_kw(x)}
return results
else:
# for sklearn based
return self.results
def func_vals(self)->np.ndarray:
"""returns the value of objective function at each iteration."""
if self.backend == 'hyperopt':
return np.array([self.trials.results[i]['loss'] for i in range(self.num_iterations)])
elif self.backend == 'optuna':
return np.array([s.values for s in self.study.trials])
elif self.use_skopt_bayes or self.use_sklearn:
return self.optfn.cv_results_['mean_test_score']
else:
return np.array([v['y'] for v in self.results.values()])
def skopt_results(self):
class OptimizeResult:
x_iters = [list(s['x'].values()) for s in self.xy_of_iterations().values()]
func_vals = self.func_vals()
space = self.skopt_space()
if isinstance(self.best_paras(), list):
x = self.best_paras
elif isinstance(self.best_paras(), dict):
x = list(self.best_paras().values())
else:
raise NotImplementedError
return OptimizeResult()
def best_iter(self)->int:
"""returns the iteration on which best/optimized parameters are obtained.
The indexing starts from 0.
"""
return np.nanargmin(self.func_vals()).item()
def best_xy(self) -> dict:
"""Returns best (optimized) parameters as dictionary.
The dictionary has two keys ``x`` and ``y``. ``x`` is the
best hyperparameters while `y` is the corresponding objective function value.
"""
d = self.xy_of_iterations()
key = list(d.keys())[self.best_iter()]
return d[key]
def _plot_edf(self, save=True, show=False, **kwargs):
"""empirical CDF of objective function"""
plt.close("all")
y = np.array(list(self.xy_of_iterations().keys())).astype("float64")
edf_plot(y, show=show, **kwargs)
if save:
plt.savefig(os.path.join(self.opt_path, "edf"))
return
def plot_parallel_coords(self, save=True, show=False, **kwargs):
""" parallel coordinates of hyperparameters
Parameters
-----------
save : bool, default=True
show : bool, default=False
**kwargs :
any keyword arguments for easy_mpl.parallel_coordinates
"""
d = self.xy_of_iterations()
data = pd.DataFrame([list(v['x'].values()) for v in d.values()],
columns=[s for s in self.space()])
categories = np.array(list(self.xy_of_iterations().keys())).astype("float64")
_kws = dict(coord_title_kws=dict(rotation=10, fontsize=12))
if kwargs is not None:
_kws.update(kwargs)
parallel_coordinates(
data=data,
categories=categories,
title="Hyperparameters",
show=False,
**_kws
)
if save:
fname = os.path.join(self.opt_path, "parallel_coordinates")
plt.savefig(fname, dpi=500, bbox_inches="tight")
if show:
plt.show()
return
def _plot_evaluations(self, save=True):
plt.close('all')
plot_evaluations(self.skopt_results(), dimensions=self.best_paras(as_list=True))
if save:
plt.savefig(os.path.join(self.opt_path, "evaluations.png"),
dpi=300,
bbox_inches='tight')
return
def _plot_convergence(self,
original:bool=False,
ax = None,
save=True,
show=False,
**kwargs):
plt.close('all')
if original:
ax = easy_mpl.plot(self.func_vals(),
marker=".",
markersize= 12,
lw= 2,
ax_kws=dict(xlabel="Number of calls $n$",
ylabel=r"$\min f(x)$ after $n$ calls",
grid=True),
show=False,
**kwargs)
else:
ax = plot_convergence(self.func_vals(), ax=ax, show=False, **kwargs)
if save:
fname = os.path.join(self.opt_path, "convergence.png")
plt.savefig(fname, dpi=300, bbox_inches='tight')
if show:
plt.show()
return ax
def process_results(self, show=False):
"""post processing of results"""
self.save_iterations_as_xy()
self.plot_parallel_coords()
# deep learning related results
if self.objective_fn_is_dl:
plot_convergences(
self.opt_path,
what='val_loss',
ylabel='Validation MSE')
plot_convergences(
self.opt_path,
what='loss',
ylabel='MSE',
leg_pos="upper right")
self._plot_edf()
# distributions/historgrams of explored hyperparameters
self._plot_distributions(show=show)
# convergence plot,
#if sr.x_iters is not None and self.backend != "skopt": # todo
self._plot_convergence(show=show)
# plot of hyperparameter space as explored by the optimizer
if self.backend != 'skopt' and len(self.space()) < 20 and skopt is not None:
self._plot_evaluations()
if len(self.best_paras(True))>1:
plt.close('all')
try:
self.plot_importance()
plt.close('all')
self.plot_importance(plot_type="bar", show=show)
except (RuntimeError, AttributeError):
warnings.warn(f"Error encountered during fanova calculation")
if self.backend == 'hyperopt':
loss_histogram([y for y in self.trials.losses()],
save=True,
fname=os.path.join(self.opt_path, "loss_histogram.png")
)
plot_hyperparameters(
self.trials,
fname=os.path.join(self.opt_path, "hyperparameters.png"),
save=True)
if plotly is not None:
if self.backend == 'optuna':
fig = plot_contour(self.study)
plotly.offline.plot(fig, filename=os.path.join(self.opt_path, 'contours.html'),
auto_open=False)
return
def plot_importance(
self,
save=True,
show:bool=False,
plot_type="box",
with_optuna:bool = False,
**tree_kws
)->plt.Axes:
"""plots hyperparameter importance using fANOVA"""
if with_optuna:
return self._calc_importance_with_optuna(plot_type, save=save, show=show)
X = pd.DataFrame([list(iter_xy['x'].values()) for iter_xy in self.xy_of_iterations().values()])
Y = np.array([iter_xy['y'] for iter_xy in self.xy_of_iterations().values()])
X.columns = list(self.xy_of_iterations()[0]['x'].keys())
dtypes = [space.__class__.__name__ for space in self.skopt_space()]
bounds = [(space.low, space.high) if isinstance(space, (Real, Integer)) else None for space in self.skopt_space()]
kws = {'X': X, 'Y': Y, 'dtypes': dtypes, 'bounds': bounds}
kws.update(tree_kws)
if plot_type == "bar":
try:
importance = fANOVA(**kws).feature_importance()
except (AttributeError, RuntimeError):
raise ValueError(f"Error encountered during fANOVA, try setting `with_optuna` to True")
ax = self._plot_importance_as_barchart(importance, save=save)
else:
try:
mean, std = fANOVA(**kws).feature_importance(return_raw=True)
except (AttributeError, RuntimeError):
raise ValueError(f"Error encountered during fANOVA, try setting `with_optuna` to True")
ax = self._plot_importance_as_boxplot(mean, std, save)
if show:
plt.show()
return ax
def _plot_importance_as_boxplot(self, mean, std, save:bool=False):
df = pd.DataFrame([mean, std])
plt.close('all')
ax = df.boxplot(rot=70, return_type="axes")
ax.set_ylabel("Relative Importance")
if save:
plt.savefig(os.path.join(
self.opt_path,
"fanova_importance_hist.png"),
dpi=300,
bbox_inches='tight')
fname = "fanova_importances_raw.json"
with open(os.path.join(self.opt_path, fname), 'w') as fp:
json.dump(jsonize(df.to_dict()), fp, indent=4, sort_keys=True)
return ax
def _plot_importance_as_barchart(self, importance, save=False):
df = pd.DataFrame.from_dict(importance, orient='index')
ax = bar_chart(df, orient='h', show=False,
ax_kws={'title': "fANOVA hyperparameter importance",
'xlabel': "Relative Importance"})
fname = "fanova_importances.json"
if save:
plt.savefig(os.path.join(self.opt_path, 'fanova_importance_bar.png'),
bbox_inches="tight", dpi=300)
with open(os.path.join(self.opt_path, fname), 'w') as fp:
json.dump(jsonize(df.to_dict()), fp, indent=4, sort_keys=True)
return ax
def _calc_importance_with_optuna(self, plot_type="bar", save=False, show=True):
# todo, it is plotting both bar_chart and boxplot on same axes
from ._optuna_fanova import plot_param_importances
importances, importance_paras, ax = plot_param_importances(self.optuna_study())
if plot_type == "bar":
if save:
plt.savefig(os.path.join(self.opt_path, 'fanova_importance_bar.png'),
bbox_inches="tight", dpi=300)
else:
plt.close('all') # because bar chart has already been drawn
df = pd.DataFrame.from_dict(importance_paras)
ax = df.boxplot(rot=70, return_type="axes")
ax.set_ylabel("Relative Importance")
if save:
plt.savefig(os.path.join(
self.opt_path,
"fanova_importance_hist.png"),
dpi=300,
bbox_inches='tight')
with open(os.path.join(self.opt_path, "importances.json"), 'w') as fp:
json.dump(importances, fp, indent=4, sort_keys=True, cls=JsonEncoder)
with open(os.path.join(self.opt_path, "fanova_importances.json"), 'w') as fp:
json.dump(importance_paras, fp, indent=4, sort_keys=True, cls=JsonEncoder)
if show:
plt.show()
return ax
def optuna_study(self):
"""
Attempts to create an optuna Study instance so that
optuna based plots can be generated.
Returns
None, if not possible else Study
"""
from optuna.study import Study
from optuna.trial import TrialState
if self.backend == 'optuna':
return self.study
class _Trial:
state = TrialState.COMPLETE
def __init__(self,
number:int,
values:Union[list, int, float],
params:dict,
distributions:dict):
values = jsonize(values)
self._number = number
self._values = values
if isinstance(values, list):
assert len(values) == 1
self.value = values[0]
elif isinstance(values, float) or isinstance(values, int):
self.value = values
else:
try: # try to convert it to float if possible
self.value = float(values)
except Exception as e:
raise NotImplementedError(f"""
values must be convertible to list but it is {values} of type
{values.__class__.__name__} Actual error message was {e}""")
self.params = params
self._distributions = distributions
self.distributions = distributions
XY_OF_ITERATIONS = self.xy_of_iterations()
SPACE = self.space()
BEST_PARAS = self.best_paras()
class _Study(Study):
trials = []
idx = 0
distributions = {sn: s.to_optuna() for sn, s in SPACE.items()}
for xy in XY_OF_ITERATIONS.values():
_x, _y = xy['x'], xy['y']
assert isinstance(_x, dict), f"""
params must of type dict but provided params are of type
{_x.__class__.__name__}"""
trials.append(_Trial(number=idx,
values=_y,
params=_x,
distributions=distributions
))
idx += 1
best_params = BEST_PARAS
best_trial = None
best_value = None
_study_id = 0
_distributions = distributions
def __init__(self):
pass
def _is_multi_objective(self):
return False
study = _Study()
setattr(self, 'study', study)
return study
def _plot_distributions(self, save=True, show=True, figsize=None)->plt.Figure:
"""plot distributions of explored hyperparameters"""
# name of hyperparameters
h_paras = list(self.best_xy()['x'].keys())
# container with a list for each hyperparameter
h_para_lists = {k: [] for k in h_paras}
for xy in self.xy_of_iterations().values():
#score = xy['y']
x_iter = xy['x']
for para, val in x_iter.items():
h_para_lists[para].append(val)
figsize = figsize or (6+len(h_paras), 6+len(h_paras))
fig, axes = create_subplots(naxes=len(h_paras),
figsize=figsize)
if not isinstance(axes, np.ndarray):
axes = np.array([axes])
for ax, col in zip(axes.flat, h_paras):
labels, bins = np.unique(np.array(h_para_lists[col]), return_counts=True)
if isinstance(self.space()[col], Real):
labels = [round(label, 3) for label in labels]
bar_chart(bins, labels, orient="v", ax=ax, rotation=90, label=col,
show=False)
ax.set_ylabel("Number of iterations")
ax.legend()
if save:
fname = os.path.join(self.opt_path, "distributions.png")
plt.savefig(fname, bbox_inches="tight")
if show:
plt.show()
return fig
def to_kw(self, x):
names = []
if isinstance(self.space(), dict):
for key in self.space().keys():
names.append(key)
else:
raise NotImplementedError
xkv = {}
if names is not None:
for name, val in zip(names, x):
xkv[name] = val
else:
xkv = x
return xkv
def eval_with_best(self):
"""
Find the best parameters and evaluate the objective_fn with them.
Arguments:
return_model bool: If True, then then the built objective_fn will be returned
"""
if self.use_named_args:
x = self.best_paras()
else:
x = self.best_paras(True)
if self.use_named_args:
return self.objective_fn(**x)
if callable(self.objective_fn) and not self.use_named_args:
if isinstance(x, list) and self.backend == 'hyperopt': # when x = [x]
if len(x) == 1:
x = x[0]
return self.objective_fn(x)
raise NotImplementedError
@classmethod
def from_gp_parameters(cls, fpath: str, objective_fn):
"""loads results saved from bayesian optimization"""
opt_path = os.path.dirname(fpath)
with open(fpath, 'r') as fp:
gpmin_results = json.load(fp)
space = gpmin_results['space']
spaces = []
for sp_name, sp_paras in space.items():
if sp_paras['type'] == 'Categorical':
spaces.append(Categorical(sp_paras['categories'], name=sp_name))
elif sp_paras['type'] == 'Integer':
spaces.append(Integer(low=sp_paras['low'], high=sp_paras['high'], name=sp_name, prior=sp_paras['prior']))
elif sp_paras['type'] == 'Real':
spaces.append(Real(low=sp_paras['low'], high=sp_paras['high'], name=sp_name, prior=sp_paras['prior']))
else:
raise NotImplementedError
optimizer = cls('bayes',
param_space=spaces,
objective_fn=objective_fn,
opt_path=opt_path,
backend='skopt')
optimizer.gpmin_results = gpmin_results
return optimizer
def pre_calculated_results(self, resutls, from_gp_parameters=True):
"""Loads the pre-calculated results i.e. x and y values which
have been already evaluated."""
with open(resutls, 'r') as fp:
results = json.load(fp)
return
def serialize(self):
return {'fun': '',
'x': '',
"best_paras": jsonize(self.best_paras()),
'space': {k: v.serialize() for k, v in self.space().items()},
'fun_vals': self.func_vals(),
# 'iters': self.xy_of_iterations(), # todo, for BayesSearchCVs, not getting ys
'algorithm': self.algorithm,
'backend': self.backend,
'opt_path': self.opt_path
}
def save_iterations_as_xy(self):
iterations = self.xy_of_iterations()
jsonized_iterations = jsonize(iterations)
fname = os.path.join(self.opt_path, "iterations.json")
with open(fname, "w") as fp:
json.dump(jsonized_iterations, fp, sort_keys=False, indent=4, cls=JsonEncoder)
fname = os.path.join(self.opt_path, "iterations_sorted.json")
with open(fname, "w") as fp:
json.dump(dict(sorted(jsonized_iterations.items())), fp, sort_keys=True, indent=4, cls=JsonEncoder)
def add_previous_results(
self,
iterations: Union[dict, str] = None,
x: list = None,
y :list = None
):
"""adds results from previous iterations.
If you have run the optimization priviously, you can make use
of those results by appending them.
Arguments:
iterations:
It can be either a dictionary whose keys are y values and values are x
or it can be a path to a file which contains these xy values as dictioary.
x:
a list of lists where each sub-list is the value of hyperparameter
at at one iteratio. The `x` and `y` arguments optional and will
only be used if `iterations` are not provided.
y:
a list of float values where each value in y is the output
of objective_fn with corresponding x. The length of `x` and `y`
must be equal.
"""
assert self.algorithm in ["bayes", "bayes_rf"]
if iterations is None:
assert isinstance(x, list) and isinstance(y, list)
assert len(x) == len(y), f"x has {len(x)} values while y has {len(y)} values. They must be equal"
x0 = x
y0 = y
elif isinstance(iterations, str):
assert os.path.exists(iterations), f"the path {iterations} does not exist"
# it is a path
with open(iterations, 'r') as fp:
iter_dict = json.load(fp)
x0, y0 = self.dict_to_xy(iter_dict)
else:
if not isinstance(iterations, dict):
raise ValueError(f"iterations must be a dictionary but it is of type {iterations.__class__.__name__}")
x0, y0 = self.dict_to_xy(iterations)
# todo check for inf and nan in y0
self.gpmin_args['x0'] = x0
self.gpmin_args['y0'] = y0
return
@staticmethod
def dict_to_xy(iterations:dict):
x0, y0 = [], []
for y, x in iterations.items():
y0.append(float(y))
x0.append(list(x.values()))
return x0, y0
def load_results(self, fname:str):
"""loads the previously computed results. It should not
be used after .fit()
parameters
----------
fname : str
complete path of hpo_results.bin file e.g.
path/to/hpo_results.bin
"""
from joblib import load # some modules may not be dependent upon joblib
assert len(self.results) == 0, f"""
Loading results after call to .fit is not allowed.
Create a new instance of HyperOpt and then call this function.
"""
if not os.path.exists(fname):
raise FileNotFoundError(f" File {fname} does not exist")
new_results = load(fname)
self.gpmin_results = new_results
fv = new_results.func_vals
xiters = new_results.x_iters
for idx, y, x in zip(range(len(fv)), fv, xiters):
self.results[idx] = {'y': y, 'x': x}
return | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/hyperopt/_main.py | _main.py |
from typing import Union
from ai4water.utils.utils import jsonize
from ai4water.backend import np, skopt, optuna
from ai4water.backend import hp
if optuna is not None:
CategoricalDistribution = optuna.distributions.CategoricalDistribution
UniformDistribution = optuna.distributions.UniformDistribution
IntLogUniformDistribution = optuna.distributions.IntLogUniformDistribution
IntUniformDistribution = optuna.distributions.IntUniformDistribution
LogUniformDistribution = optuna.distributions.LogUniformDistribution
else:
CategoricalDistribution = None
UniformDistribution = None
IntLogUniformDistribution = None
IntUniformDistribution = None
LogUniformDistribution = None
# helper class to be able to print [1, ..., 4] instead of [1, '...', 4]
class _Ellipsis:
def __repr__(self):
return '...'
if skopt is None:
class Dimension(object):
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if isinstance(value, str) or value is None:
self._name = value
else:
raise ValueError("Dimension's name must be either string or None.")
else:
from skopt.space import Dimension
if skopt is None:
class _Real(Dimension):
def __init__(self, low, high, prior="uniform", base=10, transform=None,
name=None, dtype=float):
if high <= low:
raise ValueError("the lower bound {} has to be less than the"
" upper bound {}".format(low, high))
if prior not in ["uniform", "log-uniform"]:
raise ValueError("prior should be 'uniform' or 'log-uniform'"
" got {}".format(prior))
self.low = low
self.high = high
self.prior = prior
self.base = base
self.log_base = np.log10(base)
self.name = name
self.dtype = dtype
self._rvs = None
self.transformer = None
self.transform_ = transform
class _Integer(Dimension):
def __init__(self, low, high, prior="uniform", base=10, transform=None,
name=None, dtype=np.int64):
if high <= low:
raise ValueError("the lower bound {} has to be less than the"
" upper bound {}".format(low, high))
if prior not in ["uniform", "log-uniform"]:
raise ValueError("prior should be 'uniform' or 'log-uniform'"
" got {}".format(prior))
self.low = low
self.high = high
self.prior = prior
self.base = base
self.log_base = np.log10(base)
self.name = name
self.dtype = dtype
self.transform_ = transform
class _Categorical(Dimension):
def __init__(self, categories, prior=None, transform=None, name=None):
self.categories = tuple(categories)
self.name = name
self.prior = prior
if prior is None:
self.prior_ = np.tile(1. / len(self.categories),
len(self.categories))
else:
self.prior_ = prior
else:
_Real = skopt.space.Real
_Integer = skopt.space.Integer
_Categorical = skopt.space.Categorical
class Real(_Real):
"""
This class is used for the parameters which have fractional values
such as real values from 1.0 to 3.5.
This class extends the `Real` class of Skopt so that it has an attribute grid which then
can be fed to optimization algorithm to create grid space. It also adds several further
methods to it.
Attributes
------------
grid
Methods
----------
- as_hp
- to_optuna
- suggest
- to_optuna
- serialize
Example
-------
>>> from ai4water.hyperopt import Real
>>> lr = Real(low=0.0005, high=0.01, prior='log-uniform', name='lr')
"""
counter = 0
def __init__(self,
low: float = None,
high: float = None,
num_samples: int = None,
step: int = None,
grid: Union[list, np.ndarray] = None,
*args,
**kwargs
):
"""
Arguments:
low : lower limit of parameter
high : upper limit of parameter
step : used to define `grid` in conjuction with `low` and `high`
This argument is only used when grid search algorithm is used.
grid : array like, if given, `low`, `high`, `step` and `num_samples`
will be redundant.
num_samples : if given, it will be used to create grid space using the formula
``np.linspace(low, high, num_samples)``
"""
if low is None:
assert grid is not None
assert hasattr(grid, '__len__')
low = np.min(grid)
high = np.max(grid)
self.counter += 1
if 'name' not in kwargs:
kwargs['name'] = f'real_{self.counter}'
if skopt is not None:
kwargs = check_prior(kwargs)
self.num_samples = num_samples
self.step = step
super().__init__(low=low, high=high, *args, **kwargs)
self.grid = grid
@property
def grid(self):
return self._grid
@grid.setter
def grid(self, x):
if x is None:
if self.num_samples:
self._grid = np.linspace(self.low, self.high, self.num_samples)
elif self.step:
self._grid = np.arange(self.low, self.high, self.step)
else:
self._grid = None
else:
self._grid = np.array(x)
def as_hp(self, as_named_args=True):
if self.prior == 'log-uniform':
return hp.loguniform(self.name, low=self.low, high=self.high)
else:
assert self.prior in ['uniform', 'loguniform', 'normal', 'lognormal',
'quniform', 'qloguniform', 'qnormal', 'qlognormal']
if as_named_args:
return getattr(hp, self.prior)(label=self.name, low=self.low, high=self.high)
else:
return getattr(hp, self.prior)(self.name, self.low, self.high)
def suggest(self, _trial):
# creates optuna trial
log = False
if self.prior:
if self.prior == 'log':
log = True
return _trial.suggest_float(name=self.name,
low=self.low,
high=self.high,
step=self.step, # default step is None
log=log)
def to_optuna(self):
"""returns an equivalent optuna space"""
if self.prior != 'log':
return UniformDistribution(low=self.low, high=self.high)
else:
return LogUniformDistribution(low=self.low, high=self.high)
def serialize(self):
"""Serializes the `Real` object so that it can be saved in json"""
_raum = {k: jsonize(v) for k, v in self.__dict__.items() if not callable(v)}
_raum.update({'type': 'Real'})
return _raum
def __repr__(self):
return f"Real(low={self.low}, high={self.high}," \
f" prior='{self.prior}', transform='{self.transform_}' name='{self.name}')"
class Integer(_Integer):
"""
This class is used when the parameter is integer such as integer values from 1 to 10.
Extends the Real class of Skopt so that it has an attribute grid which then
can be fed to optimization algorithm to create grid space. Moreover it also
generates optuna and hyperopt compatible/equivalent instances.
Attributes
------------
grid
Methods
----------
- as_hp
- to_optuna
- suggest
- to_optuna
- serialize
Example:
>>> from ai4water.hyperopt import Integer
>>> units = Integer(low=16, high=128, name='units')
"""
counter = 0
def __init__(self,
low: int = None,
high: int = None,
num_samples: int = None,
step: int = None,
grid: "np.ndarray, list" = None,
*args,
**kwargs
):
"""
Arguments:
low : lower limit of parameter
high : upper limit of parameter
grid list/array: If given, `low` and `high` should not be given as they will be
calculated from this grid.
step int: if given , it will be used to calculated grid using the formula
np.arange(low, high, step)
num_samples int: if given, it will be used to create grid space using the formula
np.linspace(low, high, num_samples)
"""
if low is None:
assert grid is not None
assert hasattr(grid, '__len__')
low = np.min(grid)
high = np.max(grid)
self.counter += 1
if 'name' not in kwargs:
kwargs['name'] = f'integer_{self.counter}'
self.num_samples = num_samples
self.step = step
if skopt is not None:
kwargs = check_prior(kwargs)
super().__init__(low=low, high=high, *args, **kwargs)
self.grid = grid
@property
def grid(self):
return self._grid
@grid.setter
def grid(self, x):
if x is None:
if self.num_samples:
__grid = np.linspace(self.low, self.high, self.num_samples, dtype=np.int32)
self._grid = [int(val) for val in __grid]
elif self.step:
__grid = np.arange(self.low, self.high, self.step, dtype=np.int32)
self._grid = [int(val) for val in __grid]
else:
self._grid = None
else:
assert hasattr(x, '__len__'), f"unacceptable type of grid {x.__class__.__name__}"
self._grid = np.array(x)
def as_hp(self, as_named_args=True):
if as_named_args:
return hp.randint(self.name, low=self.low, high=self.high)
else:
return hp.randint(self.name, self.low, self.high)
def suggest(self, _trial):
# creates optuna trial
log = False
if self.prior:
if self.prior == 'log':
log = True
return _trial.suggest_int(name=self.name,
low=self.low,
high=self.high,
step=self.step if self.step else 1, # default step is 1
log=log)
def to_optuna(self):
"""returns an equivalent optuna space"""
if self.prior != 'log':
return IntUniformDistribution(low=self.low, high=self.high)
else:
return IntLogUniformDistribution(low=self.low, high=self.high)
def serialize(self):
"""Serializes the `Integer` object so that it can be saved in json"""
_raum = {k: jsonize(v) for k, v in self.__dict__.items() if not callable(v)}
_raum.update({'type': 'Integer'})
return _raum
def __repr__(self):
return f"Integer(low={self.low}, high={self.high}," \
f" prior='{self.prior}', transform='{self.transform_}' name='{self.name}')"
class Categorical(_Categorical):
"""
This class is used when parameter has distinct group/class of values such
as [1,2,3] or ['a', 'b', 'c']. This class overrides skopt's `Categorical` class.
It Can be converted to optuna's distribution or hyper_opt's choice. It uses
same input arguments as received by skopt's `Categorical`_ class
Methods
----------
- as_hp
- to_optuna
- suggest
- to_optuna
- serialize
Example
-------
>>> from ai4water.hyperopt import Categorical
>>> activations = Categorical(categories=['relu', 'tanh', 'sigmoid'], name='activations')
.. _Categorical:
https://scikit-optimize.github.io/stable/modules/generated/skopt.space.space.Categorical.html
"""
@property
def grid(self):
return self.categories
def as_hp(self, as_named_args=True):
categories = self.categories
if isinstance(categories, tuple):
categories = list(self.categories)
return hp.choice(self.name, categories)
def suggest(self, _trial):
# creates optuna trial
return _trial.suggest_categorical(name=self.name, choices=self.categories)
def to_optuna(self):
return CategoricalDistribution(choices=self.categories)
def serialize(self):
"""Serializes the `Categorical object` so that it can be saved in json"""
_raum = {k: jsonize(v) for k, v in self.__dict__.items() if not callable(v)}
_raum.update({'type': 'Integer'})
return _raum
def __repr__(self):
if len(self.categories) > 7:
cats = self.categories[:3] + (_Ellipsis(),) + self.categories[-3:]
else:
cats = self.categories
if self.prior is not None and len(self.prior) > 7:
prior = self.prior[:3] + [_Ellipsis()] + self.prior[-3:]
else:
prior = self.prior
return f"Categorical(categories={cats}, prior={prior} name='{self.name}')"
def check_prior(kwargs: dict):
prior = kwargs.get('prior', 'uniform')
if prior in ["log"] and skopt.__version__ in ["0.9.0"]:
# todo see why this will give error
kwargs['prior'] = "log-uniform"
return kwargs | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/hyperopt/_space.py | _space.py |
import json
from itertools import islice
from collections import OrderedDict
try:
from skopt.plots import plot_evaluations, plot_objective
except (ModuleNotFoundError, ImportError):
plot_evaluations, plot_objective = None, None
from ai4water.utils.utils import jsonize, clear_weights
from ai4water.backend import os, np, pd, mpl, plt, skopt, easy_mpl
from ai4water.backend import hyperopt as _hyperopt
from ._space import Categorical, Real, Integer, Dimension
if skopt is None:
pass
else:
Space = skopt.space.space.Space
dump = skopt.utils.dump
if _hyperopt is not None:
space_eval = _hyperopt.space_eval
hp = _hyperopt.hp
miscs_to_idxs_vals = _hyperopt.base.miscs_to_idxs_vals
else:
space_eval, hp = None, None
miscs_to_idxs_vals = None
plot = easy_mpl.plot
def is_choice(space):
"""checks if an hp.space is hp.choice or not"""
if hasattr(space, 'name') and 'switch' in space.name:
return True
return False
def x_iter_for_tpe(trials, param_space: dict, as_list=True):
assert isinstance(param_space, dict)
x_iters = [] # todo, remove x_iters, it is just values of iterations
iterations = {}
for idx, t in enumerate(trials.trials):
vals = t['misc']['vals']
y = t['result']['loss']
x = get_one_tpe_x_iter(vals, param_space)
iterations[idx] = {'x': x, 'y': y}
x_iters.append(x)
if as_list:
return [list(d.values()) for d in x_iters]
return iterations
def get_one_tpe_x_iter(tpe_vals, param_space: dict, sort=True):
x_iter = {}
for para, para_val in tpe_vals.items():
if is_choice(param_space[para]):
hp_assign = {para: para_val[0]}
cval = space_eval(param_space[para], hp_assign)
x_iter[para] = cval
else:
x_iter[para] = para_val[0]
if sort:
x_iter = sort_x_iters(x_iter, list(param_space.keys()))
return x_iter
def sort_x_iters(x_iter: dict, original_order: list):
# the values in x_iter may not be sorted as the parameters provided in original order
new_x_iter = {}
for s in original_order:
new_x_iter[s] = x_iter[s]
return new_x_iter
def skopt_space_from_hp_spaces(hp_space: dict) -> list:
"""given a dictionary of hp spaces where keys are names, this function
converts it into skopt space."""
new_spaces = []
for k, s in hp_space.items():
new_spaces.append(skopt_space_from_hp_space(s, k))
return new_spaces
def skopt_space_from_hp_space(hp_space, prior_name=None):
"""Converts on hp space into a corresponding skopt space."""
if is_choice(hp_space):
skopt_space = to_categorical(hp_space, prior_name=prior_name)
elif any([i in hp_space.__str__() for i in ['loguniform', 'quniform', 'qloguniform', 'uniform']]):
skopt_space = to_real(hp_space, prior_name=prior_name)
elif 'randint' in hp_space.__str__():
skopt_space = to_int(hp_space, prior_name=prior_name)
elif hasattr(hp_space, 'dist') and hp_space.dist.name in ['uniform', 'loguniform', 'quniform', 'qloguniform']:
skopt_space = to_real(hp_space, prior_name=prior_name)
else:
raise NotImplementedError
return skopt_space
def to_categorical(hp_space, prior_name=None):
"""converts an hp space into a Dimension object."""
cats = []
inferred_name = None
for arg in hp_space.pos_args:
if hasattr(arg, '_obj') and arg.pure:
cats.append(arg._obj)
elif arg.name == 'hyperopt_param' and len(arg.pos_args)>0:
for a in arg.pos_args:
if a.name == 'literal' and a.pure:
inferred_name = a._obj
prior_name = verify_name(prior_name, inferred_name)
if len(cats) == 0:
raise NotImplementedError
return Categorical(categories=cats, name=prior_name)
def verify_name(prior_name, inferred_name):
"""Verfies that the given/prior name matches with the inferred name.
"""
if prior_name is None:
prior_name = inferred_name
else:
assert prior_name == inferred_name, f"""given name {prior_name} does not mach with
inferred name {inferred_name}"""
return prior_name
def to_int(_space, prior_name=None):
"""converts an hp.randint into a Dimension object"""
inferred_name = None
limits = None
for arg in _space.pos_args:
if arg.name == 'literal' and len(arg.named_args) == 0:
inferred_name = arg._obj
elif len(arg.named_args) == 2 and arg.name == 'randint':
limits = {}
for a in arg.named_args:
limits[a[0]] = a[1]._obj
elif len(arg.pos_args) == 2 and arg.name == 'randint':
# high and low are not named args
_limits = []
for a in arg.pos_args:
_limits.append(a._obj)
limits = {'high': np.max(_limits), 'low': np.min(_limits)}
else:
raise NotImplementedError
prior_name = verify_name(prior_name, inferred_name)
if limits is None:
raise NotImplementedError
return Integer(low=limits['low'], high=limits['high'], name=prior_name)
def to_real(hp_space, prior_name=None):
"""converts an an hp space to real. """
inferred_name = None
limits = None
prior = None
allowed_names = ['uniform', 'loguniform', 'quniform', 'loguniform', 'qloguniform']
for arg in hp_space.pos_args:
if len(arg.pos_args) > 0:
for a in arg.pos_args:
if a.name == 'literal' and len(arg.named_args) == 0:
inferred_name = a._obj
elif a.name in allowed_names and len(a.named_args) == 2:
prior = a.name
limits = {}
for _a in a.named_args:
limits[_a[0]] = _a[1]._obj
elif a.name in allowed_names and len(a.pos_args) == 2:
prior = a.name
_limits = []
for _a in a.pos_args:
_limits.append(_a._obj)
limits = {'high': np.max(_limits), 'low': np.min(_limits)}
prior_name = verify_name(prior_name, inferred_name)
# prior must be inferred because hp spaces always have prior
if limits is None or prior is None or 'high' not in limits:
raise NotImplementedError
return Real(low=limits['low'], high=limits['high'], prior=prior, name=prior_name)
def loss_histogram(losses, # array like
xlabel='objective_fn',
ylabel='Frequency',
save=True,
fname="histogram.png"):
plt.hist(losses)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if save:
plt.savefig(fname, dpi=300, bbox_inches='tight')
else:
plt.show()
def plot_hyperparameters(
trials,
save=True,
fontsize=10,
colorize_best=None,
columns=5,
arrange_by_loss=False,
fname='parameters_selection_plot.png'
):
"""Copying from hyperopt because original hyperopt does not allow saving the plot."""
idxs, vals = miscs_to_idxs_vals(trials.miscs)
losses = trials.losses()
finite_losses = [y for y in losses if y not in (None, float("inf"))]
asrt = np.argsort(finite_losses)
if colorize_best is not None:
colorize_thresh = finite_losses[asrt[colorize_best + 1]]
else:
# -- set to lower than best (disabled)
colorize_thresh = finite_losses[asrt[0]] - 1
loss_min = min(finite_losses)
loss_max = max(finite_losses)
print("finite loss range", loss_min, loss_max, colorize_thresh)
loss_by_tid = dict(zip(trials.tids, losses))
def color_fn_bw(lossval):
if lossval in (None, float("inf")):
return 1, 1, 1
else:
t = (lossval - loss_min) / (loss_max - loss_min + 0.0001)
if lossval < colorize_thresh:
return 0.0, 1.0 - t, 0.0 # -- red best black worst
else:
return t, t, t # -- white=worst, black=best
all_labels = list(idxs.keys())
titles = all_labels
order = np.argsort(titles)
C = min(columns, len(all_labels))
R = int(np.ceil(len(all_labels) / float(C)))
for plotnum, varnum in enumerate(order):
label = all_labels[varnum]
plt.subplot(R, C, plotnum + 1)
# hide x ticks
ticks_num, _ = plt.xticks()
plt.xticks(ticks_num, [""] * len(ticks_num))
dist_name = label
if arrange_by_loss:
x = [loss_by_tid[ii] for ii in idxs[label]]
else:
x = idxs[label]
if "log" in dist_name:
y = np.log(vals[label])
else:
y = vals[label]
plt.title(titles[varnum], fontsize=fontsize)
c = list(map(color_fn_bw, [loss_by_tid[ii] for ii in idxs[label]]))
if len(y):
plt.scatter(x, y, c=c)
if "log" in dist_name:
nums, _ = plt.yticks()
plt.yticks(nums, ["%.2e" % np.exp(t) for t in nums])
if save:
plt.savefig(fname, dpi=300, bbox_inches='tight')
else:
plt.show()
def post_process_skopt_results(skopt_results, results, opt_path, rename=True):
skopt_plots(skopt_results, pref=opt_path)
clear_weights(results=results, opt_dir=opt_path, rename=rename)
return
def save_skopt_results(skopt_results, opt_path):
fname = os.path.join(opt_path, 'gp_parameters')
sr_res = SerializeSKOptResults(skopt_results)
try:
with open(fname + '.json', 'w') as fp:
json.dump(sr_res.serialized_results, fp, sort_keys=True, indent=4)
except TypeError:
with open(fname + '.json', 'w') as fp:
json.dump(str(sr_res.serialized_results), fp, sort_keys=True, indent=4)
dump(skopt_results, os.path.join(opt_path, 'hpo_results.bin'),
store_objective=False)
return
def _plot_objective(search_results, pref="", threshold=20):
if len(search_results.x) < threshold:
if search_results.space.n_dims == 1:
pass
else:
plt.close('all')
_ = plot_objective(search_results)
plt.savefig(os.path.join(pref, 'objective'), dpi=400, bbox_inches='tight')
return
def skopt_plots(search_result,
pref=os.getcwd(),
threshold=20):
if len(search_result.x) < threshold: # it takes forever if parameters are > 20
plt.close('all')
_ = plot_evaluations(search_result)
plt.savefig(os.path.join(pref, 'evaluations'), dpi=400, bbox_inches='tight')
_plot_objective(search_result, pref=pref, threshold=threshold)
convergence(search_result.func_vals)
plt.savefig(os.path.join(pref, 'convergence'), dpi=300, bbox_inches='tight')
convergence(search_result.func_vals, show_original=True)
plt.savefig(os.path.join(pref, 'convergence_original'), dpi=300, bbox_inches='tight')
return
def convergence(func_vals, color=None,
show_original=False):
_, ax = plt.subplots()
ax.grid()
n_calls = len(func_vals)
mins = [np.min(func_vals[:i])
for i in range(1, n_calls + 1)]
if show_original:
data = func_vals
else:
data = mins
return plot(data,
color=color,
marker=".", markersize=12, lw=2,
ax_kws=dict(title="Convergence plot",
xlabel="Number of calls $n$",
ylabel=r"$\min f(x)$ after $n$ calls"),
show=False,
ax=ax)
class SerializeSKOptResults(object):
"""
This class has two functions
- converts everything in skopt results into python native types so that these results can be saved in readable
json files.
- Store as much attributes in in serialized form that a skopt `search_result` object can be generated from it
which then can be used to regenerate all hyper-parameter optimization related plots.
skopt_results is a dictionary which contains following keys
x: list, list of parameters being optimized
fun: float, final value of objective function
func_vals: numpy array, of length equal to number of iterations
x_iters: list of lists, outer list is equal to number of iterations and each inner list is equal number of parameters
being optimized
models: list of models, where a model has following attributes
- noise: str
- kernel: skopt.learning.gaussian_process.kernels.Sum, it has following 2 attributes
- k1: skopt.learning.gaussian_process.kernels.Product, it has following attributes
-k1: skopt.learning.gaussian_process.kernels.ConstantKernel
- constant_value: flaot
- constant_value_bounds: tuple of floats
-k2: skopt.learning.gaussian_process.kernels.Matern
- length_scale: numpy ndarray
- length_scale_bounds: list of floats
- nu: float
- k2: skopt.learning.gaussian_process.kernels.WhiteKernel
- noise_level: float
- noise_level_bounds: tuple of floats
- alpha: float
- optimizer: str
- n_restarts_optimizer: int
- normalize_y: bool
- copy_X_train: bool
- random_state: int
- kernel_: skopt.learning.gaussian_process.kernels.Sum
- _rng: numpy.random.mtrand.RandomState
- n_features_in_: int
- _y_train_mean: np.float64
- _y_train_std: np.float64
- X_train_: numpy array
- y_train_: numpy array
- log_marginal_likelihood_value_: numpy array
- L_: numpy array
- _K_inv: NoneType
- alpha_: numpy array
- noise_: np.float64
- K_inv_: numpy array
- y_train_std_: np.float64
- y_train_mean_: np.float64
space: skopt.space.space.Space, parameter spaces
random_state: numpy.random.mtrand.RandomState
specs: dict, specs of each iteration. It has following keys
- args: dict, which has following keys
- func: function
- dimensions: skopt.space.space.Space
- base_estimator: skopt.learning.gaussian_process.gpr.GaussianProcessRegressor, which has following attributes
- noise: str
- kernel: skopt.learning.gaussian_process.kernels.Product
- k1: skopt.learning.gaussian_process.kernels.ConstantKernel, which has following attributes
- constant_value: flaot
- constant_value_bounds: tuple of floats
- k2: skopt.learning.gaussian_process.kernels.Matern, which has following attributes
- length_scale: numpy ndarray
- length_scale_bounds: list of floats
- nu: float
- alpha: float
- optimizer: str
- n_restarts_optimizer: int
- normalize_y: bool
- copy_X_train: bool
- random_state: int
- n_cals: int
- n_random_starts: NoneType
- n_initial_points: str
- initial_point_generator: str
- acq_func: str
- acq_optimizer: str
- x0: list
- y0: NoneType
- random_state: numpy.random.mtrand.RandomState
- verbose: book
- callback: NoneType
- n_points: int
- n_restarts_optimizer: int
- xi: float
- kappa: float
- n_jobs: int
- model_queue_size: NoneType
- function: str
"""
def __init__(self, results: dict):
self.results = results
self.iters = len(results['func_vals'])
self.paras = len(results['x'])
self.serialized_results = {}
for key in results.keys():
self.serialized_results[key] = getattr(self, key)()
def x(self):
return self.para_list(self.results['x'])
def para_list(self, x):
"""Serializes list of parameters"""
_x = []
for para in x:
_x.append(jsonize(para))
return _x
def x0(self):
_x0 = []
__xo = self.results['specs']['args']['x0']
if __xo is not None:
for para in __xo:
if isinstance(para, list):
_x0.append(self.para_list(para))
else:
_x0.append(jsonize(para))
return _x0
def y0(self):
__y0 = self.results['specs']['args']['y0']
if __y0 is None:
return __y0
if isinstance(__y0, list):
_y0 = []
for y in self.results['specs']['args']['y0']:
_y0.append(jsonize(y))
return _y0
return jsonize(self.results['specs']['args']['y0'])
def fun(self):
return float(self.results['fun'])
def func_vals(self):
return [float(i) for i in self.results['func_vals']]
def x_iters(self):
out_x = []
for i in range(self.iters):
x = []
for para in self.results['x_iters'][i]:
x.append(jsonize(para))
out_x.append(x)
return out_x
def space(self):
raum = {}
for sp in self.results['space'].dimensions:
if sp.__class__.__name__ == 'Categorical':
_raum = {k: jsonize(v) for k, v in sp.__dict__.items() if k in ['categories', 'transform_',
'prior', '_name']}
_raum.update({'type': 'Categorical'})
raum[sp.name] = _raum
elif sp.__class__.__name__ == 'Integer':
_raum = {k: jsonize(v) for k, v in sp.__dict__.items() if
k in ['low', 'transform_', 'prior', '_name', 'high', 'base',
'dtype', 'log_base']}
_raum.update({'type': 'Integer'})
raum[sp.name] = _raum
elif sp.__class__.__name__ == 'Real':
_raum = {k: jsonize(v) for k, v in sp.__dict__.items() if
k in ['low', 'transform_', 'prior', '_name', 'high', 'base', 'dtype',
'log_base']}
_raum.update({'type': 'Real'})
raum[sp.name] = _raum
return raum
def random_state(self):
return str(self.results['random_state'])
def kernel(self, k):
"""Serializes Kernel"""
if k.__class__.__name__ == "Product":
return self.prod_kernel(k)
if k.__class__.__name__ == "Sum":
return self.sum_kernel(k)
# default scenario, just converts it to string
return str(k)
def prod_kernel(self, k):
"""Serializes product kernel"""
kernel = {}
for _k, v in k.__dict__.items():
kernel[_k] = self.singleton_kernel(v)
return {"ProductKernel": kernel}
def sum_kernel(self, k):
"""Serializes sum kernel"""
kernel = {}
for _k, v in k.__dict__.items():
if v.__class__.__name__ == "Product":
kernel[_k] = self.prod_kernel(v)
else:
kernel[_k] = self.singleton_kernel(v)
return {"SumKernel": kernel}
def singleton_kernel(self, k):
"""Serializes Kernels such as Matern, White, Constant Kernels"""
return {k: jsonize(v) for k, v in k.__dict__.items()}
def specs(self):
_specs = {}
_specs['function'] = self.results['specs']['function']
args = {}
args['func'] = str(self.results['specs']['args']['func'])
args['dimensions'] = self.space()
be = self.results['specs']['args']['base_estimator']
b_e = {k: jsonize(v) for k, v in be.__dict__.items() if
k in ['noise', 'alpha', 'optimizer', 'n_restarts_optimizer', 'normalize_y',
'copy_X_train', 'random_state']}
b_e['kernel'] = self.kernel(be.kernel)
args['base_estimator'] = b_e
for k,v in self.results['specs']['args'].items():
if k in ['n_cals', 'n_random_starts', 'n_initial_points', 'initial_point_generator', 'acq_func',
'acq_optimizer', 'verbose', 'callback', 'n_points', 'n_restarts_optimizer', 'xi', 'kappa',
'n_jobs', 'model_queue_size']:
args[k] = jsonize(v)
args['x0'] = self.x0()
args['y0'] = self.y0()
_specs['args'] = args
return _specs
def models(self):
mods = []
for model in self.results['models']:
mod = {k: jsonize(v) for k, v in model.__dict__.items() if k in [
'noise','alpha', 'optimizer', 'n_restarts_optimizer', 'normalize_y', 'copy_X_train', 'random_state',
'_rng', 'n_features_in', '_y_tain_mean', '_y_train_std', 'X_train', 'y_train',
'log_marginal_likelihood', 'L_', 'K_inv', 'alpha', 'noise_', 'K_inv_', 'y_train_std_', 'y_train_mean_']}
mod['kernel'] = self.kernel(model.kernel)
mods.append({model.__class__.__name__: mod})
return mods
def to_skopt_space(x):
"""converts the space x into skopt compatible space"""
if isinstance(x, list):
if all([isinstance(s, Dimension) for s in x]):
_space = Space(x)
elif all([s.__class__.__name__== "Apply" for s in x]):
_space = Space([skopt_space_from_hp_space(v) for v in x])
else:
# x consits of one or multiple tuples
assert all([isinstance(obj, tuple) for obj in x])
_space = [make_space(i) for i in x]
elif isinstance(x, dict): # todo, in random, should we build Only Categorical space?
space_ = []
for k, v in x.items():
if isinstance(v, list):
s = space_from_list(v, k)
elif isinstance(v, Dimension):
# it is possible that the user has not specified the name so assign the names
# because we have keys.
if v.name is None or v.name.startswith('real_') or v.name.startswith('integer_'):
v.name = k
s = v
elif v.__class__.__name__== "Apply" or 'rv_frozen' in v.__class__.__name__:
s = skopt_space_from_hp_space(v, k)
elif isinstance(v, tuple):
s = Categorical(v, name=k)
elif isinstance(v, np.ndarray):
s = Categorical(v.tolist(), name=k)
else:
raise NotImplementedError(f"unknown type {v}, {type(v)}")
space_.append(s)
# todo, why converting to Space
_space = Space(space_) if len(space_) > 0 else None
elif 'rv_frozen' in x.__class__.__name__ or x.__class__.__name__== "Apply":
_space = Space([skopt_space_from_hp_space(x)])
else:
raise NotImplementedError(f"unknown type {x}, {type(x)}")
return _space
def scatterplot_matrix_colored(params_names: list,
params_values: list,
best_accs: list,
blur=False,
save=True,
fname='scatter_plot.png'
):
"""Scatterplot colored according to the Z values of the points.
https://github.com/guillaume-chevalier/Hyperopt-Keras-CNN-CIFAR-100/blob/Vooban/AnalyzeTestHyperoptResults.ipynb
"""
nb_params = len(params_values)
best_accs = np.array(best_accs)
norm = mpl.colors.Normalize(vmin=best_accs.min(), vmax=best_accs.max())
fig, ax = plt.subplots(nb_params, nb_params, figsize=(16, 16)) # , facecolor=bg_color, edgecolor=fg_color)
for i in range(nb_params):
p1 = params_values[i]
for j in range(nb_params):
p2 = params_values[j]
axes = ax[i, j]
# Subplot:
if blur:
axes.scatter(p2, p1, s=400, alpha=.1,
c=best_accs, cmap='viridis', norm=norm)
axes.scatter(p2, p1, s=200, alpha=.2,
c=best_accs, cmap='viridis', norm=norm)
axes.scatter(p2, p1, s=100, alpha=.3,
c=best_accs, cmap='viridis', norm=norm)
s = axes.scatter(p2, p1, s=15,
c=best_accs, cmap='viridis', norm=norm)
# Labels only on side subplots, for x and y:
if j == 0:
axes.set_ylabel(params_names[i], rotation=0)
else:
axes.set_yticks([])
if i == nb_params - 1:
axes.set_xlabel(params_names[j], rotation=90)
else:
axes.set_xticks([])
fig.subplots_adjust(right=0.82, top=0.95)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(s, cax=cbar_ax)
plt.suptitle(
'Mmatrix of tried values in the search space over different params, colored in function of best test accuracy')
plt.show()
def take(n, iterable):
"""Return first n items of the iterable as a list
https://stackoverflow.com/questions/7971618/return-first-n-keyvalue-pairs-from-dict
"""
return list(islice(iterable, n))
def plot_convergences(opt_dir, what='val_loss', show_whole=True, show_min=False,
**kwargs):
plot_dir = os.path.join(opt_dir, "plots")
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
max_len = kwargs.get('max_len', 500)
show_top = kwargs.get('show_top', 3)
ylim_bottom = kwargs.get('ylim_bottom', None)
ylim_top = kwargs.get('ylim_top', None)
font_size = kwargs.get('font_size', 16)
ylabel = kwargs.get('ylabel', 'Validation MSE')
leg_pos = kwargs.get('leg_pos', 'upper right')
# style = kwargs.get('style', 'ggplot')
models = []
for f in os.listdir(opt_dir):
if os.path.isdir(os.path.join(opt_dir, f)):
models.append(f)
val_loss = pd.DataFrame()
default = pd.Series(np.full(max_len, 0.0))
min_val_loss = {}
for mod in models:
loss_fname = os.path.join(os.path.join(opt_dir, str(mod) + "\\losses.csv"))
if os.path.exists(loss_fname):
losses = pd.read_csv(loss_fname)
vl = losses[what]
vl1 = default.add(vl, fill_value=None)
vl1.name = mod
min_val_loss[mod] = vl1.min()
val_loss = pd.concat([val_loss, vl1], axis=1)
# sort min_val_loss by value
min_vl_sorted = {k: v for k, v in sorted(min_val_loss.items(), key=lambda item: item[1])}
top_3 = take(show_top, min_vl_sorted)
colors = {
0: 'b',
1: 'g',
2: 'orange'
}
default = np.full(max_len, np.nan)
plt.close('all')
_, axis = plt.subplots()
for k in min_vl_sorted.keys():
val = val_loss[k]
default[np.argmin(val.values)] = val.min()
if k not in top_3:
if show_whole:
axis.plot(val, color='silver', linewidth=0.5, label='_nolegend_')
if show_min:
axis.plot(default, '.', markersize=2.5, color='silver', label='_nolegend_')
default = np.full(max_len, np.nan)
for idx, k in enumerate(top_3):
val = val_loss[k]
if show_whole:
axis.plot(val, color=colors[idx], linewidth=1, label=f'Rank {idx+1}')
default[np.argmin(val.values)] = val.min()
if show_min:
axis.plot(default, 'x', markersize=5.5, color=colors[idx], label='Rank ' + str(idx))
default = np.full(max_len, np.nan)
axis.legend(loc=leg_pos, fontsize=font_size)
axis.set_yscale('log')
axis.set_ylabel(ylabel, fontsize=font_size)
axis.set_xlabel('Epochs', fontsize=font_size)
if ylim_bottom is not None:
axis.set_ylim(bottom=ylim_bottom)
if ylim_top is not None:
axis.set_ylim(top=ylim_top)
_name = what
_name += '_whole_loss_' if show_whole else ''
_name += '_loss_points' if show_min else ''
fname = os.path.join(plot_dir, _name)
plt.savefig(fname, dpi=300, bbox_inches='tight')
return
def to_skopt_as_dict(algorithm:str, backend:str, original_space)->dict:
if backend == 'hyperopt':
if original_space.__class__.__name__ == "Apply":
_space = skopt_space_from_hp_space(original_space)
_space = {_space.name: _space}
elif isinstance(original_space, dict):
_space = OrderedDict()
for k, v in original_space.items():
if v.__class__.__name__ == "Apply" or 'rv_frozen' in v.__class__.__name__:
_space[k] = skopt_space_from_hp_space(v)
elif isinstance(v, Dimension):
_space[v.name] = v
else:
raise NotImplementedError
elif isinstance(original_space, list):
if all([isinstance(s, Dimension) for s in original_space]):
_space = OrderedDict({s.name: s for s in original_space})
elif all([s.__class__.__name__== "Apply" for s in original_space]):
d = [skopt_space_from_hp_space(v) for v in original_space]
_space = OrderedDict({s.name: s for s in d})
else:
raise NotImplementedError
else:
raise NotImplementedError
elif backend == 'optuna':
if isinstance(original_space, list):
if all([isinstance(s, Dimension) for s in original_space]):
_space = OrderedDict({s.name: s for s in original_space})
else:
raise NotImplementedError
else:
raise NotImplementedError
elif backend == 'skopt':
sk_space = to_skopt_space(original_space)
if isinstance(sk_space, Dimension):
_space = {sk_space.name: sk_space}
elif all([isinstance(s, Dimension) for s in sk_space]):
_space = OrderedDict()
for s in sk_space:
_space[s.name] = s
else:
raise NotImplementedError
elif backend == 'sklearn':
if isinstance(original_space, list):
if all([isinstance(s, Dimension) for s in original_space]):
_space = OrderedDict({s.name: s for s in original_space})
else:
raise NotImplementedError
elif isinstance(original_space, dict):
_space = OrderedDict()
for k, v in original_space.items():
if isinstance(v, list):
s = space_from_list(v, k)
elif isinstance(v, Dimension):
s = v
elif isinstance(v, (tuple, list)):
s = Categorical(v, name=k)
elif v.__class__.__name__ in ["Apply",
'rv_continuous_frozen'] or 'rv_frozen' in v.__class__.__name__:
if algorithm == 'random':
s = Real(v.kwds['loc'], v.kwds['loc'] + v.kwds['scale'], name=k, prior=v.dist.name)
else:
s = skopt_space_from_hp_space(v)
else:
raise NotImplementedError(f"unknown type {v}, {type(v)}")
_space[k] = s
else:
raise NotImplementedError
else:
raise NotImplementedError
return _space
def space_from_list(v: list, k: str):
"""Returns space of tyep "Dimension"
"""
if len(v) > 2:
if isinstance(v[0], int):
s = Integer(grid=v, name=k)
elif isinstance(v[0], float):
s = Real(grid=v, name=k)
else:
s = Categorical(v, name=k)
else:
if isinstance(v[0], int):
s = Integer(low=np.min(v), high=np.max(v), name=k)
elif isinstance(v[0], float):
s = Real(low=np.min(v), high=np.max(v), name=k)
elif isinstance(v[0], str):
s = Categorical(v, name=k)
else:
raise NotImplementedError
return s
def plot_convergence(func_vals, show=False, ax=None, **kwargs):
func_vals = np.array(func_vals)
n_calls = len(func_vals)
mins = [np.min(func_vals[:i])
for i in range(1, n_calls + 1)]
if ax is None:
ax = plt.gca()
_kwargs = {
"marker":".",
"markersize": 12,
"lw": 2,
"show":show,
"ax_kws": {"title": 'Convergence plot',
"xlabel": 'Number of calls $n$',
"ylabel": '$\min f(x)$ after $n$ calls',
'grid': True},
'ax': ax,
}
_kwargs.update(kwargs)
ax = plot(range(1, n_calls + 1), mins, **_kwargs)
return ax
def make_space(x):
if len(x) == 2:
low, high = x
if 'int' in low.__class__.__name__:
space = Integer(low=low, high=high)
elif 'float' in low.__class__.__name__:
space = Integer(low=low, high=high)
else:
raise NotImplementedError
else:
raise NotImplementedError
return space | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/hyperopt/utils.py | utils.py |
__all__ = ["fANOVA"]
import itertools
import warnings
from typing import Tuple, List, Set, Union
from sklearn.preprocessing import OneHotEncoder
from sklearn.tree._tree import Tree
from sklearn.ensemble import RandomForestRegressor
from ai4water.backend import np, pd
class fANOVA(object):
"""
Calculation of parameter importance using FANOVA (Hutter et al., 2014).
Parameters
----------
X :
input data of shape (n_iterations, n_parameters). For hyperparameter optimization,
iterations represent number of optimization iterations and parameter represent
number of hyperparameters
Y :
objective value corresponding to X. Its length should be same as that of ``X``
dtypes : list
list of strings determining the type of hyperparameter. Allowed values are only
``categorical`` and ``numerical``.
bounds : list
list of tuples, where each tuple defines the upper and lower limit of corresponding
parameter
parameter_names : list
names of features/parameters/hyperparameters
cutoffs : tuple
n_estimators : int
number of trees
max_depth : int (default=64)
maximum depth of trees
**rf_kws :
keyword arguments to sklearn.ensemble.RandomForestRegressor
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from ai4water.hyperopt import fANOVA
>>> x = np.arange(20).reshape(10, 2).astype(float)
>>> y = np.linspace(1, 30, 10).astype(float)
... # X are hyperparameters and Y are objective function values at corresponding iterations
>>> f = fANOVA(X=x, Y=y,
... bounds=[(-2, 20), (-5, 50)],
... dtypes=["numerical", "numerical"],
... random_state=313, max_depth=3)
... # calculate importance
>>> imp = f.feature_importance()
for categorical parameters
>>> x = pd.DataFrame(['2', '2', '3', '1', '1', '2', '2', '1', '3', '3', '3'], columns=['a'])
>>> x['b'] = ['3', '3', '1', '3', '1', '2', '4', '4', '3', '3', '4']
>>> y = np.linspace(-1., 1.0, len(x))
>>> f = fANOVA(X=x, Y=y, bounds=[None, None], dtypes=['categorical', 'categorical'],
... random_state=313, max_depth=3, n_estimators=1)
... # calculate importance
>>> imp = f.feature_importance()
for mix types
>>> x = pd.DataFrame(['2', '2', '3', '1', '1', '2', '2', '1', '3', '3', '3'], columns=['a'])
>>> x['b'] = np.arange(100, 100+len(x))
>>> y = np.linspace(-1., 2.0, len(x))
>>> f = fANOVA(X=x, Y=y, bounds=[None, (10, 150)], dtypes=['categorical', 'numerical'],
... random_state=313, max_depth=5, n_estimators=5)
... # calculate importance
>>> imp = f.feature_importance()
"""
def __init__(
self,
X:Union[np.ndarray, pd.DataFrame],
Y:np.ndarray,
dtypes:List[str],
bounds:List[Union[tuple, None]],
parameter_names=None,
cutoffs=(-np.inf, np.inf),
n_estimators=64,
max_depth=64,
random_state=313,
**rf_kws
):
X_ = []
encoders = {}
cols = {}
_bounds = []
if isinstance(X, pd.DataFrame):
if parameter_names is None:
parameter_names = X.columns.tolist()
X = X.values
if parameter_names is None:
parameter_names = [f"F{i}" for i in range(X.shape[1])]
assert len(parameter_names) == len(bounds) == len(dtypes) == X.shape[1]
assert len(X) == len(Y), f"X and Y should have same length"
self.para_names = parameter_names
if np.isnan(Y).sum()>0:
warnings.warn("Removing nans encountered in Y.")
df = pd.DataFrame(np.column_stack((X, Y))).dropna()
X, Y = df.values[:, 0:-1], df.values[:, -1]
for idx, dtype in enumerate(dtypes):
if dtype.lower() == "categorical":
x = X[:, idx]
ohe = OneHotEncoder(sparse=False)
x_ = ohe.fit_transform(x.reshape(-1,1))
X_.append(x_)
encoders[idx] = ohe
cols[idx] = x_.shape[1]
__bounds = [(0., 1.) for _ in range(x_.shape[1])]
assert bounds[idx] is None, f"Cannot set bounds for categorical column"
_bounds += __bounds
else:
X_.append(X[:, idx])
cols[idx] = 1
assert isinstance(bounds[idx], tuple), f"for non categorical parameters bounds must be given as tuple as (min,max)"
assert len(bounds[idx]), 2
assert bounds[idx][0] < bounds[idx][1]
_bounds.append(bounds[idx])
self.encoded_columns = encoded_columns(cols)
X_ = np.column_stack(X_)
self._n_features = X_.shape[1]
self._N_features = X.shape[1]
self.bounds = _bounds
self.rf = RandomForestRegressor(
n_estimators=n_estimators,
max_depth=max_depth,
random_state=random_state,
**rf_kws
)
self.rf.fit(X_, Y)
# initialize a dictionary with parameter dims
self.variance_dict = dict()
# all midpoints and interval sizes treewise for the whole forest
self.all_midpoints = []
self.all_sizes = []
# compute midpoints and interval sizes for variables in each tree
for dt in self.rf.estimators_:
sizes = []
midpoints = []
tree_split_values = self._tree_split_values(dt.tree_)
for i, split_vals in enumerate(tree_split_values):
#if np.isnan(bounds[i][1]): # categorical parameter
# pass
#else:
# add bounds to split values
sv = np.array([_bounds[i][0]] + list(split_vals) + [_bounds[i][1]])
# compute midpoints and sizes
midpoints.append((1 / 2) * (sv[1:] + sv[:-1]))
sizes.append(sv[1:] - sv[:-1])
self.all_midpoints.append(midpoints)
self.all_sizes.append(sizes)
# capital V in the paper
self.trees_total_variance = []
# dict of lists where the keys are tuples of the dimensions
# and the value list contains \hat{f}_U for the individual trees
# reset all the variance fractions computed
self.trees_variance_fractions = {}
self.V_U_total = {}
self.V_U_individual = {}
self.cutoffs = cutoffs
self.set_cutoffs(cutoffs)
def _tree_split_values(self, tree:Tree):
"""calculates split values for a decision tree"""
split_values = [set() for _ in range(self._n_features)]
for node_index in range(tree.node_count):
feature = tree.feature[node_index]
if feature >= 0: # Not leaf.
threshold = tree.threshold[node_index]
split_values[feature].add(threshold)
sorted_split_values = []
for split_val in split_values:
split_values_array = np.array(list(split_val), dtype=np.float64)
split_values_array.sort()
sorted_split_values.append(split_values_array)
return sorted_split_values
def set_cutoffs(self, cutoffs=(-np.inf, np.inf), quantile=None):
"""
Setting the cutoffs to constrain the input space
To properly do things like 'improvement over default' the
fANOVA now supports cutoffs on the y values. These will exclude
parts of the parameters space where the prediction is not within
the provided cutoffs. This is is specialization of
"Generalized Functional ANOVA Diagnostics for High Dimensional
Functions of Dependent Variables" by Hooker.
"""
# reset all the variance fractions computed
self.trees_variance_fractions = {}
self.V_U_total = {}
self.V_U_individual = {}
# recompute the trees' total variance
self.trees_total_variance = self.get_trees_total_variances()
return
def get_trees_total_variances(self)->tuple:
"""get variance of all trees"""
variance = []
for dt in self.rf.estimators_:
variance.append(self._tree_variance(dt.tree_))
return tuple(variance)
def _tree_variance(self, tree:Tree):
leaf_node_indices = np.argwhere(tree.feature<0).reshape(-1,)
statistics = self._tree_statistics(tree)
values, weights = [], []
for node_index in leaf_node_indices:
val, weight = statistics[node_index]
values.append(val)
weights.append(weight)
avg_values = np.average(values, weights=weights)
variance = np.average((np.array(values) - avg_values) ** 2, weights=weights)
return variance
def _tree_statistics(self, tree:Tree) -> np.ndarray:
n_nodes = tree.node_count
# Holds for each node, its weighted average value and the sum of weights.
statistics = np.empty((n_nodes, 2), dtype=np.float64)
subspaces = [None for _ in range(n_nodes)]
subspaces[0] = np.array(self.bounds)
# Compute marginals for leaf nodes.
for node_index in range(n_nodes):
subspace = subspaces[node_index]
if tree.feature[node_index] < 0:
value = tree.value[node_index]
weight = _get_cardinality(subspace)
statistics[node_index] = [value, weight]
else:
for child_node_index, child_subspace in zip(
_get_node_children(node_index, tree),
_get_node_children_subspaces(node_index, subspace, tree),
):
assert subspaces[child_node_index] is None
subspaces[child_node_index] = child_subspace
# Compute marginals for internal nodes.
for node_index in reversed(range(n_nodes)):
if not tree.feature[node_index] < 0: # if not node leaf
child_values = []
child_weights = []
for child_node_index in _get_node_children(node_index, tree):
child_values.append(statistics[child_node_index, 0])
child_weights.append(statistics[child_node_index, 1])
value = np.average(child_values, weights=child_weights)
weight = np.sum(child_weights)
statistics[node_index] = [value, weight]
return statistics
def _compute_marginals(self, dimensions):
"""
Returns the marginal of selected parameters
Parameters
----------
dimensions: tuple
Contains the indices of ConfigSpace for the selected parameters (starts with 0)
"""
dimensions = tuple(dimensions)
# check if values has been previously computed
if dimensions in self.V_U_individual:
return
# otherwise make sure all lower order marginals have been
# computed, if not compute them
for k in range(1, len(dimensions)):
for sub_dims in itertools.combinations(dimensions, k):
if sub_dims not in self.V_U_total:
self._compute_marginals(sub_dims)
assert len(dimensions)==1
raw_dimensions = self.encoded_columns[dimensions[0]]
# now all lower order terms have been computed
self.V_U_individual[dimensions] = []
self.V_U_total[dimensions] = []
for tree_idx in range(len(self.all_midpoints)):
# collect all the midpoints and corresponding sizes for that tree
midpoints = [self.all_midpoints[tree_idx][dim] for dim in raw_dimensions]
sizes = [self.all_sizes[tree_idx][dim] for dim in raw_dimensions]
prod_midpoints = itertools.product(*midpoints)
prod_sizes = itertools.product(*sizes)
sample = np.full(self._n_features, np.nan, dtype=np.float)
values: Union[List[float], np.ndarray] = []
weights: Union[List[float], np.ndarray] = []
# make prediction for all midpoints and weigh them by the corresponding size
for i, (m, s) in enumerate(zip(prod_midpoints, prod_sizes)):
sample[list(raw_dimensions)] = list(m)
value, weight = self._tree_marginalized_statistics(sample, self.rf.estimators_[tree_idx].tree_)
weight *= np.prod(s)
values.append(value)
weights.append(weight)
weights = np.array(weights)
values = np.array(values)
average_values = np.average(values, weights=weights)
variance = np.average((values - average_values) ** 2, weights=weights)
assert variance >= 0.0, f"Non convergence of variance {variance}"
# line 10 in algorithm 2
# note that V_U^2 can be computed by var(\hat a)^2 - \sum_{subU} var(f_subU)^2
# which is why, \hat{f} is never computed in the code, but
# appears in the pseudocode
V_U_total = np.nan
V_U_individual = np.nan
if weights.sum()>0:
V_U_total = variance
V_U_individual = variance
for k in range(1, len(dimensions)):
for sub_dims in itertools.combinations(dimensions, k):
V_U_individual -= self.V_U_individual[sub_dims][tree_idx]
V_U_individual = np.clip(V_U_individual, 0, np.inf)
self.V_U_individual[dimensions].append(V_U_individual)
self.V_U_total[dimensions].append(V_U_total)
return
def _tree_marginalized_statistics(self,
feature_vector: np.ndarray,
tree:Tree
) -> Tuple[float, float]:
assert feature_vector.size == self._n_features
statistics = self._tree_statistics(tree)
marginalized_features = np.isnan(feature_vector)
active_features = ~marginalized_features
# Reduce search space cardinalities to 1 for non-active features.
search_spaces = np.array(self.bounds.copy())
search_spaces[marginalized_features] = [0.0, 1.0]
# Start from the root and traverse towards the leafs.
active_nodes = [0]
active_search_spaces = [search_spaces]
node_indices = []
active_features_cardinalities = []
tree_active_features = self._tree_active_features(tree)
while len(active_nodes) > 0:
node_index = active_nodes.pop()
search_spaces = active_search_spaces.pop()
feature = tree.feature[node_index]
if feature >= 0: # Not leaf. Avoid unnecessary call to `_is_node_leaf`.
# If node splits on an active feature, push the child node that we end up in.
response = feature_vector[feature]
if not np.isnan(response):
if response <= tree.threshold[node_index]:
next_node_index = tree.children_left[node_index]
next_subspace = _get_node_left_child_subspaces(
node_index, search_spaces, tree
)
else:
next_node_index = tree.children_right[node_index]
next_subspace = _get_node_right_child_subspaces(
node_index, search_spaces, tree
)
active_nodes.append(next_node_index)
active_search_spaces.append(next_subspace)
continue
# If subtree starting from node splits on an active feature, push both child nodes.
if (active_features & tree_active_features[node_index]).any():
for child_node_index in _get_node_children(node_index, tree):
active_nodes.append(child_node_index)
active_search_spaces.append(search_spaces)
continue
# If node is a leaf or the subtree does not split on any of the active features.
node_indices.append(node_index)
active_features_cardinalities.append(_get_cardinality(search_spaces))
node_indices = np.array(node_indices, dtype=np.int32)
active_features_cardinalities = np.array(active_features_cardinalities)
statistics = statistics[node_indices]
values = statistics[:, 0]
weights = statistics[:, 1]
weights = weights / active_features_cardinalities
value = np.average(values, weights=weights)
weight = weights.sum()
return value, weight
def _tree_active_features(self, tree:Tree) -> List[Set[int]]:
subtree_active_features = np.full((tree.node_count, self._n_features), fill_value=False)
for node_index in reversed(range(tree.node_count)):
feature = tree.feature[node_index]
if feature >= 0: # Not leaf. Avoid unnecessary call to `_is_node_leaf`.
subtree_active_features[node_index, feature] = True
for child_node_index in _get_node_children(node_index, tree):
subtree_active_features[node_index] |= subtree_active_features[
child_node_index
]
return subtree_active_features
def quantify_importance(self, dims):
if type(dims[0]) == str:
idx = []
for i, param in enumerate(dims):
idx.append(self.get_idx_by_hyperparameter_name(param))
dimensions = tuple(idx)
# make sure that all the V_U values are computed for each tree
else:
dimensions = dims
self._compute_marginals(dimensions)
importance_dict = {}
for k in range(1, len(dimensions) + 1):
for sub_dims in itertools.combinations(dimensions, k):
if type(dims[0]) == str:
dim_names = []
for j, dim in enumerate(sub_dims):
dim_names.append(self.get_hyperparameter_by_idx(dim))
dim_names = tuple(dim_names)
importance_dict[dim_names] = {}
else:
importance_dict[sub_dims] = {}
# clean here to catch zero variance in a trees
non_zero_idx = np.nonzero([self.trees_total_variance[t] for t in range(self.rf.n_estimators)])
if len(non_zero_idx[0]) == 0:
raise RuntimeError('Encountered zero total variance in all trees.')
fractions_total = np.array([self.V_U_total[sub_dims][t] / self.trees_total_variance[t]
for t in non_zero_idx[0]])
fractions_individual = np.array([self.V_U_individual[sub_dims][t] / self.trees_total_variance[t]
for t in non_zero_idx[0]])
if type(dims[0]) == str:
importance_dict[dim_names]['individual importance'] = np.mean(fractions_individual)
importance_dict[dim_names]['total importance'] = np.mean(fractions_total)
importance_dict[dim_names]['individual std'] = np.std(fractions_individual)
importance_dict[dim_names]['total std'] = np.std(fractions_total)
else:
importance_dict[sub_dims]['individual importance'] = np.mean(fractions_individual)
importance_dict[sub_dims]['total importance'] = np.mean(fractions_total)
importance_dict[sub_dims]['individual std'] = np.std(fractions_individual)
importance_dict[sub_dims]['total std'] = np.std(fractions_total)
return importance_dict
def feature_importance(self, dimensions=None, interaction_level=1, return_raw:bool=False):
if dimensions is None:
dimensions = self.para_names
if isinstance(dimensions, (int, str)):
dimensions = (dimensions,)
importances_mean = {}
importances_std = {}
for dim in dimensions:
imp = self.quantify_importance((dim, ))
importances_mean[dim] = imp[(dim,)]['individual importance']
importances_std[dim] = imp[(dim,)]['individual std']
if len(dimensions) == self._N_features:
importances_sum = sum(importances_mean.values())
for name in importances_mean:
importances_mean[name] /= importances_sum
importances = {k: v for k, v in reversed(sorted(importances_mean.items(), key=lambda item: item[1]))}
if return_raw:
return importances_mean, importances_std
return importances
def get_hyperparameter_by_idx(self, idx:int)->str:
return self.para_names[idx]
def get_idx_by_hyperparameter_name(self, name:str)->int:
return self.para_names.index(name)
def _get_node_right_child_subspaces(
node_index: int,
search_spaces: np.ndarray,
tree:Tree
) -> np.ndarray:
return _get_subspaces(
search_spaces,
search_spaces_column=0,
feature=tree.feature[node_index],
threshold=tree.threshold[node_index],
)
def _get_node_children(node_index: int, tree:Tree) -> Tuple[int, int]:
return tree.children_left[node_index], tree.children_right[node_index]
def _get_cardinality(search_spaces: np.ndarray) -> float:
return np.prod(search_spaces[:, 1] - search_spaces[:, 0]).item()
def _get_subspaces(
search_spaces: np.ndarray, *,
search_spaces_column: int, feature: int, threshold: float
) -> np.ndarray:
search_spaces_subspace = np.copy(search_spaces)
search_spaces_subspace[feature, search_spaces_column] = threshold
return search_spaces_subspace
def _get_node_children_subspaces(
node_index: int, search_spaces: np.ndarray, tree
) -> Tuple[np.ndarray, np.ndarray]:
return (
_get_node_left_child_subspaces(node_index, search_spaces, tree),
_get_node_right_child_subspaces(node_index, search_spaces, tree),
)
def _get_node_left_child_subspaces(
node_index: int, search_spaces: np.ndarray, tree
) -> np.ndarray:
return _get_subspaces(
search_spaces,
search_spaces_column=1,
feature=tree.feature[node_index],
threshold=tree.threshold[node_index],
)
def encoded_columns(cols:dict):
# c = {'a': 2, 'b': 1, 'c': 3}
# -> {'a': [0,1], 'b': [2], 'c': [3,4,5]}
en = 0
st = 0
columns = {}
for k, v in cols.items():
en += v
columns[k] = [i for i in range(st, en)]
st += v
return columns
if __name__ == "__main__":
pass | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/hyperopt/_fanova.py | _fanova.py |
import warnings
from collections import OrderedDict
from ai4water.backend import np, plt, imageio, shapefile, os, random
MSG = f"""
If you installed shapely using pip, try to resintall it
(after uninstalling the previous installtin obviously)
by manually downloading the wheel file from
https://www.lfd.uci.edu/~gohlke/pythonlibs/#shapely
and then istalling using the wheel file using following command
pip install path/to/wheel.whl"""
M2ToAcre = 0.0002471 # meter square to Acre
COLORS = ['#CDC0B0', '#00FFFF', '#76EEC6', '#C1CDCD', '#E3CF57', '#EED5B7', '#8B7D6B', '#0000FF', '#8A2BE2', '#9C661F',
'#FF4040', '#8A360F', '#98F5FF', '#FF9912', '#B23AEE', '#9BCD9B', '#8B8B00']
def get_sorted_dict(dictionary):
"""sorts the dictionary based on its keys and returns the new dictionary named sorted_dict"""
sorted_dict = OrderedDict()
for k in sorted(dictionary):
sorted_dict[k] = dictionary[k]
return sorted_dict
def get_areas_geoms(shp_reader):
"""returns lists containing areas of all records in shapefile and geometries of all records
in shape file and number of records in shapefile"""
try:
from shapely.geometry import shape
except FileNotFoundError:
raise FileNotFoundError(MSG)
except OSError:
warnings.warn(MSG, UserWarning)
shape = None # so that docs can be built
except ModuleNotFoundError:
warnings.warn(MSG, UserWarning)
shape = None # so that docs can be built
shapes = shp_reader.shapes()
geometries = [None] * len(shapes) # a container for geometries of shapefile
areas = [None] * len(shapes) # a container for areas of shapefile
if shape is None:
raise ModuleNotFoundError('shapely package should be installed.')
for shp in range(len(shapes)):
feature = shp_reader.shapeRecords()[shp] # pyshp
first = feature.shape.__geo_interface__ # pyshp
geometries[shp] = shape(first) # pyshp to shapely geometry
areas[shp] = shape(first).area * M2ToAcre
return areas, geometries
def check_shp_validity(geom_list, no_of_lus, name='landuse', verbosity=1):
new_geom_list = [None] * no_of_lus # a container for geometries of landuse shapefile with corrected topology
for lu in range(no_of_lus): # iterating over each landuse
luf = geom_list[lu]
if not luf.is_valid:
n = 0
for j in range(len(luf)): # iterating over each ring of feature which is invalid
if not luf[j].is_valid: # checking which ring in feature is invalid
n = n + 1 # counting number of invalid rings in a feature
new_geom_list[lu] = luf.buffer(0) # correcting the 'self-intersection' for the feature which is invalid
else:
new_geom_list[lu] = luf
# checking the validity of each landuse once again to make sure that each landuse's is valid
for lu in range(no_of_lus):
sub_lu = new_geom_list[lu]
if sub_lu.is_valid and verbosity:
print('{} {} is valid now'.format(name, lu))
elif verbosity:
print('{} {} is still invalid'.format(name, lu))
return new_geom_list
def get_total_area(file_to_read):
shape_area = 0.0
for sub_shp in file_to_read:
shape_area += sub_shp.area*M2ToAcre
return shape_area
class GifUtil(object):
def __init__(self, folder, initials=None, contains=None):
self.init = initials # starting name of files
self.contains = contains
self.input_files = [] # container for input files
self.get_all_files(initials) # get files to make gif
self.dir = folder # folder containing input files and output gif
def get_all_files(self, init):
for file in os.listdir("./plots"):
if file.endswith(".png"):
if self.init:
if file.startswith(init):
self.input_files.append(file)
if self.contains:
if self.contains in file:
self.input_files.append(file)
def make_gif(self, duration=0.5, name=None):
if name is None:
if self.init:
name = self.init
else:
name = self.contains
images = []
for file in np.sort(self.input_files):
filename = os.path.join(self.dir, file)
images.append(imageio.imread(filename))
imageio.mimsave(os.path.join(self.dir, name + '.gif'), images, duration=duration)
def remove_images(self):
for img in self.input_files:
path = os.path.join(self.dir, img)
if os.path.exists(path):
os.remove(path)
def find_records(shp_file, record_name, feature_number):
"""find the metadata about feature given its feature number and column_name which contains the data"""
assert os.path.exists(shp_file), f'{shp_file} does not exist'
shp_reader = shapefile.Reader(shp_file)
col_no = find_col_name(shp_reader, record_name)
if col_no == -99:
raise ValueError(f'no column named {record_name} found in {shp_reader.shapeName}')
else:
# print(col_no, 'is the col no')
name = get_record_in_col(shp_reader, feature_number, col_no)
return name
def find_col_name(shp_reader, field_name):
_col_no = 0
col_no = -99
for fields in shp_reader.fields:
_col_no += 1
for field in fields:
if field == field_name:
col_no = _col_no
break
return col_no
def get_record_in_col(shp_reader, i, col_no):
recs = shp_reader.records()
col_no = col_no - 2 # -2, 1 for index reduction, 1 for a junk column shows up in records
return recs[i][col_no]
def plot_shapefile(shp_files,
labels=None,
show_all_together=True,
bbox_shp=None,
recs=None, rec_idx=None,
leg_kws=None,
save=False,
colors=None,
markersize=12,
save_kws=None):
"""
leg_kws:{'bbox_to_anchor': (1.02, -0.15),
'numpoints': 1,
'fontsize': 16,
'markerscale':2}
save_kws:{'fname': 'point_plot', 'bbox_inches': 'tight'}
"""
if not isinstance(shp_files, list):
shp_files = [shp_files]
if leg_kws is None:
leg_kws = {'bbox_to_anchor': (0.93, -0.15),
'numpoints': 1,
'fontsize': 16,
'markerscale': 2}
if labels is None:
labels = {}
if save_kws is None:
save_kws = {'fname': 'point_plot', 'dpi': 300, 'bbox_inches': 'tight'}
records = shapefile.Reader(shp_files[0]).shapeRecords()
Colors = random.choices(COLORS, k=len(records))
if len(shp_files) > 1:
for i in range(1, len(shp_files)):
shp_reader = shapefile.Reader(shp_files[i])
records += shp_reader.shapeRecords()
Colors += random.choices(COLORS, k=len(shp_reader.shapeRecords()))
plt.close('all')
for feature, n in zip(records, Colors):
if recs is not None:
assert isinstance(rec_idx, int)
rec = feature.record[rec_idx]
else:
rec, recs = '', ''
if rec in recs:
f_if = feature.shape.__geo_interface__
if f_if is None:
pass
else:
if f_if['type'].lower() in ['point']: # it is point
c = colors.get(rec, random.choice(COLORS))
plt.plot(*f_if['coordinates'], '*', label=labels.get(rec, rec), color=c, markersize=markersize)
else:
plot_polygon_feature(feature, n, shapefile.Reader(shp_files[0]).bbox)
if bbox_shp is not None:
shp_reader = shapefile.Reader(bbox_shp)
records = shp_reader.shapeRecords()
for feature, n in zip(records, Colors):
plot_polygon_feature(feature, n, shapefile.Reader(shp_files[0]).bbox)
plt.legend(**leg_kws)
if not show_all_together:
plt.show()
if save:
plt.savefig(**save_kws)
# if show_all_together:
plt.show()
# shp_reader.close()
return
def plot_polygon_feature(feature, n, bbox):
f_if = feature.shape.__geo_interface__
polys = len(f_if['coordinates'])
def_col = n
for i in range(polys):
a = np.array(f_if['coordinates'][i])
if a.ndim < 2 and len(a.shape) > 0:
c = a
m = max([len(ci) for ci in c])
for ci in c:
col = 'k' if len(ci) != m else def_col
x = np.array([k[0] for k in ci])
y = np.array([k[1] for k in ci])
plt.plot(x, y, col, label="__none__", linewidth=0.5)
elif len(a.shape) > 0:
b = a.reshape(-1, 2)
plt.plot(b[:, 0], b[:, 1], def_col)
plt.ylim([bbox[1], bbox[3]])
plt.xlim([bbox[0], bbox[2]])
return | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/preprocessing/spatial_utils.py | spatial_utils.py |
from typing import Union
from collections import OrderedDict
from .spatial_utils import find_records
from .spatial_utils import plot_shapefile
from .spatial_utils import get_total_area, GifUtil
from .spatial_utils import get_sorted_dict, get_areas_geoms, check_shp_validity
from ai4water.backend import os, np, pd, plt, mpl, shapefile, easy_mpl
mdates = mpl.dates
M2ToAcre = 0.0002471 # meter square to Acre
COLORS = ['#CDC0B0', '#00FFFF', '#76EEC6', '#C1CDCD', '#E3CF57', '#EED5B7',
'#8B7D6B', '#0000FF', '#8A2BE2', '#9C661F', '#FF4040', '#8A360F',
'#98F5FF', '#FF9912', '#B23AEE', '#9BCD9B', '#8B8B00']
class MakeHRUs(object):
"""
Distributes a given time series data for HRUs in a catchment according to
the `hru_definition`. Currently it is supposed that only land use changes
with time.
Example:
>>> import os
>>> from ai4water.preprocessing.spatial_processing import MakeHRUs
>>> # shapefile_paths is the path where shapefiles are located. todo
>>> SubBasin_shp = os.path.join(shapefile_paths, 'sub_basins.shp')
>>> shapefile_paths = os.path.join(os.getcwd(), 'shapefiles')
>>> hru_object = MakeHRUs('unique_lu_sub',
... index={2011: {'shapefile': os.path.join(shapefile_paths, 'lu2011.shp'), 'feature': 'NAME'},
... 2012: {'shapefile': os.path.join(shapefile_paths, 'lu2012.shp'), 'feature': 'NAME'}},
... subbasins_shape={'shapefile': SubBasin_shp, 'feature': 'id'}
... )
>>> hru_object.call()
"""
HRU_DEFINITIONS = [
'unique_sub',
'unique_lu',
'unique_soil',
'unique_slope',
'unique_lu_sub',
'unique_lu_soil',
'unique_lu_slope',
'unique_soil_sub',
'unique_soil_slope',
'unique_slope_sub',
'unique_lu_soil_sub',
'unique_lu_soil_slope',
]
def __init__(
self,
hru_definition:str,
index:dict,
soil_shape: Union[dict, None] = None,
slope_shape: Union[dict, None]=None,
subbasins_shape: Union[None, dict] = None,
save:bool = False,
show:bool = True,
verbosity: int = 1
):
"""
Parameters
----------
hru_definition :
hru definition. For valid hru_definitions check `MakeHRUs.HRU_DEFINITIONS`
index :
dictionary defining shapefiles of landuse which change with time.
For example in following a land use shapefile for two years is defined.
All attributes in land use shapefiles must have the feature `NAME`.
>>> {2011: {'shapefile': os.path.join(shapefile_paths, 'lu2011.shp'), 'feature': 'NAME'},
... 2012: {'shapefile': os.path.join(shapefile_paths, 'lu2012.shp'), 'feature': 'NAME'}}
soil_shape :
only applicable if `soil` exists in hru_definition.
All attributes in land use soil.shp must have the feature `NAME`.
>>> {'shapefile': os.path.join(shapefile_paths, 'soil.shp'), 'feature': 'NAME'}
slope_shape :
only applicable if `slope` exists in hru_definition.
All attributes in slope.shp shapefiles must have the feature `percent`.
>>> {'shapefile': os.path.join(shapefile_paths, 'slope.shp'), 'feature': 'percent'}
subbasins_shape :
only applicable if `sub` exists in hru_definition.
All attributes in land use shapefiles must have the feature `id`.
>>> {'shapefile': os.path.join(shapefile_paths, 'subbasins.shp'), 'feature': 'id'}
save : bool
show : bool, default=False
whether to save the plots or not.
verbosity :
Determines verbosity.
"""
if shapefile is None:
raise ModuleNotFoundError(f"You must install pyshp package e.g. pip install pyshp")
self.hru_definition = hru_definition
assert hru_definition in self.HRU_DEFINITIONS, f"""
invalid value for hru_definition '{hru_definition}' provided.
Allowed values are
{self.HRU_DEFINITIONS}"""
self.combinations = hru_definition.split('_')[1:]
if len(self.combinations)<2:
if isinstance(index, dict):
if not all([i.__class__.__name__=='NoneType' for i in index.values()]):
assert all([i.__class__.__name__=='NoneType' for i in [soil_shape, slope_shape, subbasins_shape]]), f"""
hru consists of only one feature i.e. {self.combinations[0]}. Thus if index is provided then not
other shapefile must be given.
"""
self.index = index
self.soil_shape = soil_shape
self.slope_shape = slope_shape
self.sub_shape = subbasins_shape
self.hru_paras = OrderedDict()
self.hru_geoms = OrderedDict()
self.all_hrus = []
self.hru_names = []
self.save = save
self.show = show
self.verbosity = verbosity
st, en = list(index.keys())[0], list(index.keys())[-1]
# initiating yearly dataframes
self.area = pd.DataFrame(index=pd.date_range(str(st) + '0101', str(en) + '1231', freq='12m'))
self.curve_no = pd.DataFrame(index=pd.date_range(str(st) + '0101', str(en) + '1231', freq='12m'))
# distance_to_outlet
self.dist_to_out = pd.DataFrame(index=pd.date_range(str(st) + '0101', str(en) + '1231',
freq='12m'))
# area of HRU as fraction of total catchment
self.area_frac_cat = pd.DataFrame(index=pd.date_range(str(st) + '0101', str(en) + '1231',
freq='12m'))
def call(self, plot_hrus=True):
"""
Makes the HRUs.
Parameters
----------
plot_hrus :
If true, the exact area hrus will be plotted as well.
"""
for _yr, shp_file in self.index.items():
_hru_paras, _hru_geoms = self.get_hrus(shp_file, _yr)
self.hru_paras.update(_hru_paras)
if plot_hrus:
self.plot_hrus(year=_yr, _polygon_dict=self.hru_geoms, nrows=3, ncols=4,
bbox=self.slope_shape, annotate=False,
name=self.hru_definition)
return
def get_hrus(self,
idx_shp,
year
):
"""
idx_shp :
shapefile whose area distribution changes with time e.g land use
"""
if self.verbosity > 0:
print('Checking validity of landuse shapefile')
hru_paras = OrderedDict()
a = 0
if len(self.combinations) ==1:
shp_name = self.combinations[0]
if idx_shp is None:
shp_file = getattr(self, f'{shp_name}_shape')['shapefile']
feature = getattr(self, f'{shp_name}_shape')['feature']
else:
shp_file = idx_shp['shapefile']
feature = idx_shp['feature']
shp_reader = shapefile.Reader(shp_file)
_, shp_geom_list = get_areas_geoms(shp_reader)
if 'sub' not in self.hru_definition:
shp_geom_list = check_shp_validity(shp_geom_list, len(shp_reader.shapes()), name=shp_name,
verbosity=self.verbosity)
self.tot_cat_area = get_total_area(shp_geom_list)
for shp in range(len(shp_reader.shapes())):
code = f'{str(year)}_{shp_name}_{find_records(shp_file, feature, shp)}'
hru_paras[code] = {'yearless_key': code[4:]}
intersection = shp_geom_list[shp]
self.hru_geoms[code] = [intersection, shp_geom_list[shp]]
self._foo(code, intersection)
if len(self.combinations) == 2:
first_shp_name = self.combinations[0]
second_shp_name = self.combinations[1]
if idx_shp is None:
first_shp_file = getattr(self, f'{first_shp_name}_shape')['shapefile']
first_feature = getattr(self, f'{first_shp_name}_shape')['feature']
else:
first_shp_file = idx_shp['shapefile']
first_feature = idx_shp['feature']
second_shp_file = getattr(self, f'{second_shp_name}_shape')['shapefile']
second_feature = getattr(self, f'{second_shp_name}_shape')['feature']
first_shp_reader = shapefile.Reader(first_shp_file)
second_shp_reader = shapefile.Reader(second_shp_file)
_, first_shp_geom_list = get_areas_geoms(first_shp_reader)
_, second_shp_geom_list = get_areas_geoms(second_shp_reader)
if 'sub' not in self.hru_definition:
second_shp_geom_list = check_shp_validity(second_shp_geom_list, len(second_shp_reader.shapes()),
name=second_shp_name, verbosity=self.verbosity)
self.tot_cat_area = get_total_area(first_shp_geom_list)
else:
self.tot_cat_area = get_total_area(second_shp_geom_list)
for j in range(len(second_shp_reader.shapes())):
for lu in range(len(first_shp_reader.shapes())):
a += 1
intersection = second_shp_geom_list[j].intersection(first_shp_geom_list[lu])
lu_code = find_records(first_shp_file, first_feature, lu)
sub_code = find_records(second_shp_file, second_feature, j)
sub_code = f'_{second_shp_name}_' + str(sub_code)
code = str(year) + sub_code + f'_{first_shp_name}_' + lu_code #, lu_code
self.hru_geoms[code] = [intersection, second_shp_geom_list[j], first_shp_geom_list[lu]]
hru_paras[code] = {'yearless_key': code[4:]}
self._foo(code, intersection)
if len(self.combinations) == 3:
first_shp_name = self.combinations[0]
second_shp_name = self.combinations[1]
third_shp_name = self.combinations[2]
if idx_shp is None:
first_shp_file = getattr(self, f'{first_shp_name}_shape')['shapefile']
first_feature = getattr(self, f'{first_shp_name}_shape')['feature']
else:
first_shp_file = idx_shp['shapefile']
first_feature = idx_shp['feature']
second_shp_file = getattr(self, f'{second_shp_name}_shape')['shapefile']
second_feature = getattr(self, f'{second_shp_name}_shape')['feature']
third_shp_file = getattr(self, f'{third_shp_name}_shape')['shapefile']
third_feature = getattr(self, f'{third_shp_name}_shape')['feature']
first_shp_reader = shapefile.Reader(first_shp_file)
second_shp_reader = shapefile.Reader(second_shp_file)
third_shp_reader = shapefile.Reader(third_shp_file)
_, first_shp_geom_list = get_areas_geoms(first_shp_reader)
_, second_shp_geom_list = get_areas_geoms(second_shp_reader)
_, third_shp_geom_list = get_areas_geoms(third_shp_reader)
if 'sub' not in self.hru_definition: # todo
second_shp_geom_list = check_shp_validity(second_shp_geom_list, len(second_shp_reader.shapes()),
name=second_shp_name,
verbosity=self.verbosity)
self.tot_cat_area = get_total_area(first_shp_geom_list)
else:
self.tot_cat_area = get_total_area(second_shp_geom_list)
for s in range(len(third_shp_reader.shapes())):
for j in range(len(second_shp_reader.shapes())):
for lu in range(len(first_shp_reader.shapes())):
intersection = second_shp_geom_list[j].intersection(first_shp_geom_list[lu])
sub = third_shp_geom_list[s]
intersection = sub.intersection(intersection)
sub_code = f'_{third_shp_name}_' + str(find_records(third_shp_file, third_feature, s))
lu_code = find_records(first_shp_file, first_feature, lu)
soil_code = find_records(second_shp_file, second_feature, j)
code = str(year) + sub_code + f'_{second_shp_name}_' + str(soil_code) + f'_{first_shp_name}_' + lu_code
self.hru_geoms[code] = [intersection, second_shp_geom_list[j], first_shp_geom_list[lu]]
hru_paras[code] = {'yearless_key': code[4:]}
self._foo(code, intersection)
# if sum(LuAreaListAcre) > sum(SubAreaListAcre):
# print('Land use area is bigger than subbasin area')
# IntArea_ = [[None] * no_of_subs for _ in range(no_of_lus)]
# IntLuArea = [None] * no_of_lus
# for lu in range(no_of_lus):
# # Int = 0
# IntArea = 0
# for sub_ind in range(no_of_subs):
# Int = LuShpGeomListNew[lu].intersection(SubShpGeomList[sub_ind])
# code, lu_code = get_code(year=year, lu_feature=LuShpGeomListNew[lu], lu_feat_ind=lu,
# sub_feat=SubShpGeomList[sub_ind], sub_feat_ind=sub_ind, _hru_def=_hru_def)
# IntArea += Int.area * M2ToAcre
# anarea = Int.area * M2ToAcre
# IntArea_[lu][sub_ind] = anarea
# print('area of {0} is reduced from {1:10.3f} acres to {2:10.3f}'
# .format(lu_code, LuAreaListAcre[lu], IntArea)) # lu_code/j
# IntLuArea[lu] = IntArea
# print('New area of all landuses is {}'.format(sum(IntLuArea)))
# else:
# print('Land use area is equal to subbasin area')
self.hru_names = list(set(self.hru_names))
return hru_paras, self.hru_geoms
def _foo(self, code, intersection):
hru_name = code[5:]
year = code[0:4]
row_index = pd.to_datetime(year + '0131', format='%Y%m%d', errors='ignore')
self.hru_names.append(hru_name)
self.all_hrus.append(code)
anarea = intersection.area * M2ToAcre
# saving value of area for currnet HRU and for for current year in dictionary
self.area.loc[row_index, hru_name] = anarea
self.area_frac_cat.loc[row_index, hru_name] = anarea / self.tot_cat_area
return
def plot_hrus(self, year, bbox, _polygon_dict, annotate=False, nrows=3,
ncols=4, name='',
annotate_missing_hru=False)->plt.Figure:
polygon_dict = OrderedDict()
for k, v in _polygon_dict.items():
if str(year) in k[0:4]:
polygon_dict[k] = v
# sorting dictionary based on keys so that it puts same HRU at same place for each year
polygon_dict = get_sorted_dict(polygon_dict)
figure, axs = plt.subplots(nrows, ncols)
if isinstance(bbox, str):
r = shapefile.Reader(bbox)
bbox = r.bbox
r.close()
figure.set_figwidth(27)
figure.set_figheight(12)
axis_l = [item for sublist in list(axs) for item in sublist]
# max_bbox = get_bbox_with_max_area(_polygon_dict)
i = 0
for key, axis in zip(polygon_dict, axis_l):
i += 1
ob = polygon_dict[key][0]
# text0 = key.split('_')[4]+' in '+key.split('_')[1] +' '+ key.split('_')[2]
if ob.type == 'MultiPolygon':
anfang_x = [None] * len(ob)
for s_ob in range(len(ob)):
ob_ = ob[s_ob]
x, y = ob_.exterior.xy
axis.plot(x, y, color=np.random.rand(3, ), alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2)
axis.get_yaxis().set_visible(False)
axis.get_xaxis().set_visible(False)
if bbox is not None:
axis.set_ylim([bbox[1], bbox[3]])
axis.set_xlim([bbox[0], bbox[2]])
anfang_x[s_ob] = x[0]
if annotate:
axis.annotate(key[3:], xy=(0.2, 0.1), xycoords='axes fraction', fontsize=18)
elif ob.type == 'Polygon':
x, y = polygon_dict[key][0].exterior.xy
axis.plot(x, y, color=np.random.rand(3, ), alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2)
axis.get_yaxis().set_visible(False)
axis.get_xaxis().set_visible(False)
if annotate:
axis.annotate(key[3:], xy=(0.2, 0.1), xycoords='axes fraction', fontsize=18)
if bbox is not None:
axis.set_ylim([bbox[1], bbox[3]])
axis.set_xlim([bbox[0], bbox[2]])
# axis.text(x[0], np.mean(y), text0, color='red', fontsize='12')
else: # for empty cases
x, y = np.arange(0, 10), np.arange(0, 10)
axis.plot(x, y, color='w')
text1 = 'no ' + key.split('_')[1] + ' in sub-basin ' + key.split('_')[2]
if annotate_missing_hru:
axis.text(x[0], np.mean(y), text1, color='red', fontsize=16)
axis.get_yaxis().set_visible(False)
axis.get_xaxis().set_visible(False)
figure.suptitle('HRUs for year {}'.format(year), fontsize=22)
# plt.title('HRUs for year {}'.format(year), fontsize=22)
if self.save:
name = 'hrus_{}.png'.format(year) if name is None else name + str(year)
plt.savefig(name, bbox_inches="tight")
if self.show:
plt.show()
return figure
def plot_as_ts(
self,
marker='o',
ms=8,
min_xticks=None, max_xticks=None,
figsize=None,
**kwargs)->plt.Axes:
"""
parameters
----------
marker :
ms :
marker size as integer
min_xticks : int
minimum ticks on x-axis
max_xticks : int
maximum ticks on x-axis
figsize :
figure size
**kwargs
any keyword arguments for easy_mpl.plot
hru_object.plot_as_ts()"""
figsize = figsize or (12, 6)
legend_kws = dict(fontsize=14, markerscale=2, bbox_to_anchor=(1.1, 0.99))
ax_kws = dict(
xlabel="Time", xlabel_kws=dict(fontsize=18),
ylabel="Area (Acres)", ylabel_kws=dict(fontsize=18),
legend_kws=legend_kws,
title = 'Variation of Area (acre) of HRUs with time'
)
plt.close('all')
_, axis = plt.subplots(figsize=figsize)
for area in self.area:
easy_mpl.plot(self.area[area], marker=marker,
mfc='white', ms=ms, lw=4,
label=area,
ax=axis, ax_kws=ax_kws, show=False, **kwargs)
axis.tick_params(color='lightgrey', labelsize=14, labelcolor='grey')
axis.grid(ls='--', color='lightgrey')
if min_xticks is not None:
assert isinstance(min_xticks, int)
assert isinstance(max_xticks, int)
loc = mdates.AutoDateLocator(minticks=4, maxticks=6)
axis.xaxis.set_major_locator(loc)
fmt = mdates.AutoDateFormatter(loc)
axis.xaxis.set_major_formatter(fmt)
if self.save:
plt.savefig(f'{self.hru_definition}_hru_as_ts.png', dpi=300, bbox_inches="tight")
if self.show:
plt.show()
return axis
def plot_hru_evolution(self, hru_name, make_gif=False):
"""
plots how the hru evolved during the years
Parameters
----------
hru_name : str,
name of hru to be plotted
make_gif : bool
if True, a gif file will be created from evolution plots
Returns
-------
"""
for yr in self.index.keys():
y = str(yr)[2:] + '_'
hru_name_year = y + hru_name # hru name with year
self.plot_hru(hru_name_year, self.soil_shape)
if make_gif:
gif = GifUtil(folder=os.path.join(os.getcwd(), 'plots'), contains=hru_name)
gif.make_gif()
gif.remove_images()
return
def make_gif(self):
gif = GifUtil(initials=self.hru_definition, folder=os.path.join(os.getcwd(), 'plots'))
gif.make_gif()
gif.remove_images()
return
def plot(self, what:str, index=None, show_all_together=True):
assert what in ['landuse', 'soil', 'subbasins', 'slope']
if what == 'landuse':
assert index
shp_file = self.index[index]
else:
shp_file = getattr(self, f'{what}_shape')
return plot_shapefile(shp_file, show_all_together)
def plot_hru(self, hru_name, bbox=None)->plt.Figure:
"""
plot only one hru from `hru_geoms`.
The value of each key in hru_geoms is a list with three shapes
Examples
--------
>>> self.plot_an_hru(self.hru_names[0], bbox=True)
"""
shape_list = self.hru_geoms[hru_name]
figure, (axis_list) = plt.subplots(3)
figure.set_figheight(14)
if bbox:
r = shapefile.Reader(bbox)
bbox = r.bbox
i = 0
leg = hru_name
for axs, ob in zip(axis_list, shape_list):
i +=1
if i==2: leg = hru_name.split('_')[1:2]
elif i==3: leg = hru_name.split('_')[-1]
if ob.type == 'MultiPolygon':
for s_ob in range(len(ob)):
ob_ = ob[s_ob]
x, y = ob_.exterior.xy
axs.plot(x, y, color=COLORS[i], alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2)
elif ob.type == 'Polygon':
x, y = ob.exterior.xy
axs.plot(x, y, color=COLORS[i], alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2)
axs.set_title(leg, fontsize=14)
if bbox:
axs.set_ylim([bbox[1], bbox[3]])
axs.set_xlim([bbox[0], bbox[2]])
if self.save:
plt.savefig(hru_name + '.png')
if self.show:
plt.show()
return figure
def draw_pie(
self,
year:int,
n_merge:int=0,
shadow:bool = True,
title:bool=False,
**kwargs
)->tuple:
"""
todo draw nested pie chart for all years
https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.pie.html
Draws a pie chart showing relative area of HRUs for a particular year.
Since the hrus can change with time, selecting one year is based on supposition
that area of hrus remain constant during the whole year.
Parameters
----------
year : int,
the year for which area of hrus will be used.
n_merge :
number of hrus to merge
shadow : bool
title :
**kwargs :
any keyword arguments for `easy_mpl.pie`
Returns
--------
tuple
"""
idx = str(year) + '-01-31'
area_unsort = self.area.loc[idx]
area = area_unsort.sort_values()
merged = area[area.index[0]:area.index[n_merge-1]]
rest = area[area.index[n_merge]:]
if n_merge==0:
assert len(merged) == len(area)
merged_vals= []
merged_labels = []
else:
merged_vals = [sum(merged.values)]
merged_labels = ['{} HRUs'.format(n_merge)]
vals = list(rest.values) + merged_vals
labels = list(rest.keys()) + merged_labels
explode = [0 for _ in range(len(vals))]
explode[-1] = 0.1
labels_n = []
for l in labels:
labels_n.append(l.replace('lu_', ''))
if title:
title = 'Areas of HRUs for year {}'.format(year)
outs = easy_mpl.pie(fractions=vals,
labels=labels_n,
explode=tuple(explode),
shadow=shadow,
ax_kws=dict(title=title), show=False,
**kwargs)
name = f'{len(self.hru_names)}hrus_for_{year}_{self.hru_definition}.png'
if self.save:
plt.savefig(name, dpi=300, bbox_inches="tight")
if self.show:
plt.show()
return outs | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/preprocessing/spatial_processing.py | spatial_processing.py |
from typing import Union
from ai4water.backend import imputations, np, pd, plt
# use LGBM imputation method
# https://www.kaggle.com/robikscube/handling-with-missing-data-youtube-stream#Level-4:-LightGBM-Imputer!!
# plot imputation distribution
class Imputation(object):
"""
Implements imputation of missing values using a range of methods.
Imputation Methods
-----------------
- pandas:
Pandas library provides two methods for filling input data.
`interpolate`: filling by interpolation
Example of imputer_args can be
{'method': 'spline': 'order': 2}
For detailed args to be passed see interpolate_
`fillna`:
example of imputer_args can be
{'method': 'ffill'}
For detailed args to be passed see fillna_
- sklearn:
scikit-learn library provides 3 different imputation methods.
`SimplteImputer`:
For details see SimpleImputer_
`IterativeImputer`:
imputer_args example: {'n_nearest_features': 2}
For details see IterativeImputer_
`KNNIMputer`:
All the args accepted by KNNImputer of sklearn can be passed as in imputer_args.
imputer_args example: {'n_neighbors': 3}.
For details KNNImputer_
- fancyimpute:
knn:
NuclearnNormMinimization
SoftImpute
Biscaler
transdim:
Methods
--------
- :py:meth:`ai4water.preprocessing.imputation.Imputation.plot` plots the imputed values.
- :py:meth:`ai4water.preprocessing.imputation.Imputation.missing_indices` indices of missing data.
Examples:
>>> import pandas as pd
>>> import numpy as np
>>> from ai4water.preprocessing import Imputation
>>> df = pd.DataFrame([1,3,np.nan, np.nan, 9, np.nan, 11])
>>> imputer = Imputation(df, method='fillna', imputer_args={'method': 'ffill'})
>>> imputer()
# change the imputation method
>>> imputer.method = 'interpolate'
>>> imputer(method='cubic')
# Now try with KNN imputation
>>> imputer.method = 'KNNImputer'
>>> imputer(n_neighbors=3)
.. _fillna:
https://pandas.pydata.org/pandas-docs/version/0.22.0/generated/pandas.DataFrame.fillna.html
.. _interpolate:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.interpolate.html
.. _SimpleImputer:
https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html#sklearn.impute.SimpleImputer
.. _IterativeImputer:
https://scikit-learn.org/stable/modules/generated/sklearn.impute.IterativeImputer.html#sklearn.impute.IterativeImputer
.. _KNNImputer:
https://scikit-learn.org/stable/modules/generated/sklearn.impute.KNNImputer.html
"""
def __init__(
self,
data: Union[pd.DataFrame, np.ndarray, list],
method: str = 'KNNImputer',
features=None,
imputer_args: dict = None
):
"""
Arguments:
data:
the data which contains missing values
method:
the method to apply for missing
features:
the features on which imputation is to be applied
imputer_args:
arguments for underlying imputer function
"""
self.data = self.maybe_make_df(data)
self.method = method
self.features = features or self.data.columns
self.imputer_args = {} if imputer_args is None else imputer_args
self.new_data = None
@property
def method(self):
return self._method
@method.setter
def method(self, x):
self._method = x
def call(self, *args, **kwargs):
raise NotImplementedError(f"You must ovewrite the `call` method to implement {self.method} method")
def __call__(self, *args, **kwargs):
"""
If kwargs are provided they will overwrite self.imputer_args. This helps to use same instance of
Imputantion class with different args.
"""
if kwargs:
kwargs = kwargs
else:
kwargs = self.imputer_args
if self.method in ['fillna', 'interpolate']: # it is a pandas based
for col in self.data.columns:
if col in self.features:
self.data[col] = getattr(self.data[col], self.method)(**kwargs)
elif self.method in imputations:
imputer = imputations[self.method](**kwargs)
data = self.data.copy() # making a copy so that non-imputed features remain intact
_data = self.data[self.features].values
data_ = imputer.fit_transform(_data)
if isinstance(data_, np.ndarray):
data_ = pd.DataFrame(data_, columns=self.features, index=self.data.index)
data[self.features] = data_
setattr(self, 'data', data)
else:
return self.call()
if self._dtype == 'list':
self.data = self.data.values.reshape(-1,).tolist()
elif self._dtype == 'ndarray':
self.data = self.data.values
return self.data
def plot(self, cols=None, st=0, en=None):
"""
cols: columns to plot from data
st: int
en: int
Example
-------
>>> imputer.plot(cols=['in1', 'in2'], st=0, en=25)
"""
if cols is not None:
if not isinstance(cols, list):
assert isinstance(cols, str) and cols in self.data
cols = [cols]
else:
cols = list(self.new_data.columns)
if en is None:
en = len(self.data)
plt.close('all')
_, axis = plt.subplots(len(cols), sharex='all')
if not isinstance(axis, np.ndarray):
axis = [axis]
indices = self.missing_indices()
for col, ax in zip(cols, axis):
idx = indices[col]
ax.plot(self.data[col][st:en], linestyle='-', color='k', marker='o', fillstyle='full', label="Original")
ax.plot(self.new_data[col][idx][st:en], linestyle='--', marker='*', color='aqua', label="Imputed")
ax.set_title(col)
ax.legend()
plt.show()
return
def missing_indices(self) -> dict:
# https://github.com/scikit-learn/scikit-learn/blob/7cc3dbcbe/sklearn/impute/_base.py#L556
indices = {}
for col in self.data.columns:
# https://stackoverflow.com/a/42795371/5982232
indices[col] = np.isnan(self.data[col].values.astype(float))
return indices
def maybe_make_df(self, data):
setattr(self, '_dtype', data.__class__.__name__)
data = data.copy()
if isinstance(data, pd.DataFrame):
data = data
else:
data = np.array(data)
if data.ndim == 1:
data = data.reshape(-1, 1)
assert isinstance(data, np.ndarray)
data = pd.DataFrame(data, columns=['data'+str(i) for i in range(data.shape[1])])
return data | AI4Water | /AI4Water-1.6.tar.gz/AI4Water-1.6/ai4water/preprocessing/imputation.py | imputation.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.