prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from sklearn.model_selection import train_test_split
# load data sets
# 3' utrs composition
utrs = pd.read_csv("../../data/19-01-17-Get-ORFS-UTRS-codon-composition/sequence-data/zfish_3utr6mer_composition.csv")
utrs = utrs.rename(columns={'ensembl_gene_id': 'Gene_ID'}).drop('3utr', axis=1)
# optimality
pls = | pd.read_csv("../19-02-24-OverlapPathwaysFig3/results_data/regulatory_pathways_matrix.csv") | pandas.read_csv |
import pandas as pd
from fairlens.metrics.correlation import distance_cn_correlation, distance_nn_correlation
from fairlens.sensitive.correlation import find_column_correlation, find_sensitive_correlations
pair_race = "race", "Ethnicity"
pair_age = "age", "Age"
pair_marital = "marital", "Family Status"
pair_gender = "gender", "Gender"
pair_nationality = "nationality", "Nationality"
def test_correlation():
col_names = ["gender", "random", "score"]
data = [
["male", 10, 60],
["female", 10, 80],
["male", 10, 60],
["female", 10, 80],
["male", 9, 59],
["female", 11, 80],
["male", 12, 61],
["female", 10, 83],
]
df = pd.DataFrame(data, columns=col_names)
res = {"score": [pair_gender]}
assert find_sensitive_correlations(df) == res
def test_double_correlation():
col_names = ["gender", "nationality", "random", "corr1", "corr2"]
data = [
["woman", "spanish", 715, 10, 20],
["man", "spanish", 1008, 20, 20],
["man", "french", 932, 20, 10],
["woman", "french", 1300, 10, 10],
]
df = pd.DataFrame(data, columns=col_names)
res = {"corr1": [pair_gender], "corr2": [pair_nationality]}
assert find_sensitive_correlations(df) == res
def test_multiple_correlation():
col_names = ["race", "age", "score", "entries", "marital", "credit", "corr1"]
data = [
["arabian", 21, 10, 2000, "married", 10, 60],
["carribean", 20, 10, 3000, "single", 10, 90],
["indo-european", 41, 10, 1900, "widowed", 10, 120],
["carribean", 40, 10, 2000, "single", 10, 90],
["indo-european", 42, 10, 2500, "widowed", 10, 120],
["arabian", 19, 10, 2200, "married", 10, 60],
]
df = pd.DataFrame(data, columns=col_names)
res = {"corr1": [pair_race, pair_marital]}
assert find_sensitive_correlations(df, corr_cutoff=0.9) == res
def test_common_correlation():
col_names = ["race", "age", "score", "entries", "marital", "credit", "corr1", "corr2"]
data = [
["arabian", 21, 10, 2000, "married", 10, 60, 120],
["carribean", 20, 10, 3000, "single", 10, 90, 130],
["indo-european", 41, 10, 1900, "widowed", 10, 120, 210],
["carribean", 40, 10, 2000, "single", 10, 90, 220],
["indo-european", 42, 10, 2500, "widowed", 10, 120, 200],
["arabian", 19, 10, 2200, "married", 10, 60, 115],
]
df = pd.DataFrame(data, columns=col_names)
res = {
"corr1": [pair_race, pair_age, pair_marital],
"corr2": [pair_age],
}
assert find_sensitive_correlations(df) == res
def test_column_correlation():
col_names = ["gender", "nationality", "random", "corr1", "corr2"]
data = [
["woman", "spanish", 715, 10, 20],
["man", "spanish", 1008, 20, 20],
["man", "french", 932, 20, 10],
["woman", "french", 1300, 10, 10],
]
df = pd.DataFrame(data, columns=col_names)
res1 = [pair_gender]
res2 = [pair_nationality]
assert find_column_correlation("corr1", df) == res1
assert find_column_correlation("corr2", df) == res2
def test_series_correlation():
col_names = ["race", "age", "score", "entries", "marital", "credit"]
data = [
["arabian", 21, 10, 2000, "married", 10],
["carribean", 20, 10, 3000, "single", 10],
["indo-european", 41, 10, 1900, "widowed", 10],
["carribean", 40, 10, 2000, "single", 10],
["indo-european", 42, 10, 2500, "widowed", 10],
["arabian", 19, 10, 2200, "married", 10],
]
df = pd.DataFrame(data, columns=col_names)
s1 = pd.Series([60, 90, 120, 90, 120, 60])
s2 = pd.Series([120, 130, 210, 220, 200, 115])
res1 = [pair_race, pair_marital]
res2 = [pair_age]
assert set(find_column_correlation(s1, df, corr_cutoff=0.9)) == set(res1)
assert set(find_column_correlation(s2, df, corr_cutoff=0.9)) == set(res2)
def test_basic_nn_distance_corr():
sr_a = pd.Series([10.0, 20.0, 30.0, 40.0, 50.0, 60.0])
sr_b = pd.Series([30.0, 10.0, 20.0, 60.0, 50.0, 40.0])
assert distance_nn_correlation(sr_a, sr_b) > 0.75
def test_cn_basic_distance_corr():
sr_a = pd.Series(["A", "B", "A", "A", "B", "B"])
sr_b = | pd.Series([15, 45, 14, 16, 44, 46]) | pandas.Series |
import sys
import argparse
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
import pandas as pd
from scipy import interpolate
from scipy.spatial import Delaunay
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from ATE import UniformSamplingStrategy, Domain, Samplerun
from ..data_utils import load_batches, encode_data_frame, x_y_split, c_d_y_split, c_d_split
from ..plot_utils import set_plotting_style
from ..plot_reg_performance import plot_reg_performance
from ..model_loader import get_model_factory, load_model_from_file
from ..metric_loader import get_metric_factory
from tbr_reg.endpoints.training import train, test, plot, plot_results, get_metrics
def main():
'''
Perform quality-adaptive sampling algorithm
'''
# Parse inputs and store in relevant variables.
args = input_parse()
init_samples = args.init_samples
step_samples = args.step_samples
step_candidates = args.step_candidates
d_params = disctrans(args.disc_fix)
# Collect surrogate model type and theory under study.
thismodel = get_model_factory()[args.model](cli_args=sys.argv[7:])
thistheory = globals()["theory_" + args.theory]
domain = Domain()
if args.saved_init:
# load data as initial evaluated samples
df = load_batches(args.saved_init, (0, 1 + int(init_samples/1000)))
X_init, d, y_multiple = c_d_y_split(df.iloc[0:init_samples])
d_params = d.values[0]
print(d.values[0][0])
y_init = y_multiple['tbr']
domain.fix_param(domain.params[1], d_params[0])
domain.fix_param(domain.params[2], d_params[1])
domain.fix_param(domain.params[3], d_params[2])
domain.fix_param(domain.params[5], d_params[3])
domain.fix_param(domain.params[6], d_params[4])
domain.fix_param(domain.params[7], d_params[5])
domain.fix_param(domain.params[8], d_params[6])
if not args.saved_init:
# generate initial parameters
sampling_strategy = UniformSamplingStrategy()
c = domain.gen_data_frame(sampling_strategy, init_samples)
print(c.columns)
# evaluate initial parameters in given theory
print("Evaluating initial " + str(init_samples) + " samples in " + args.theory + " theory.")
output = thistheory(params = c, domain = domain, n_samples = init_samples)
X_init, d, y_multiple = c_d_y_split(output)
y_init = y_multiple['tbr']
current_samples, current_tbr = X_init, y_init
# MAIN QASS LOOP
complete_condition = False
iter_count = 0
err_target = 0.0001
max_iter_count = 10000
all_metrics = | pd.DataFrame() | pandas.DataFrame |
###################
#
# File handling the processing of cvs and calculations for
# the Naive Bayes probability
#
# NOTES:
# 1- Need to loop for looking for the historical files
# 2- Need to create accessible function from outside to get:
# a- begin and end lon/lats
# b- sigmoid and 1-5 mapping
# c- Count events in state
#
####################
import pandas as pd
import numpy as np
import os
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn import metrics
states = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
"HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
"SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"]
#predictor
global mnb
mnb = MultinomialNB()
# empty death and injury dataframe
global di_data
di_data = pd.DataFrame(columns=['STATE','COUNT','NUM_EVENTS', 'MONTH_NAME', 'CLASS'])
# empty location dataframe
global loc_data
loc_data = pd.DataFrame(columns=['STATE','MONTH_NAME','INJURIES_DIRECT','INJURIES_INDIRECT','DEATHS_DIRECT','DEATHS_INDIRECT','BEGIN_LAT','BEGIN_LON','END_LAT','END_LON'])
global state_events
state_events = {}
global state_names
state_names={'ALABAMA':0,
'ALASKA':1,
'ARIZONA':2,
'ARKANSAS':3,
'CALIFORNIA':4,
'COLORADO':5,
'CONNECTICUT':6,
'DELAWARE':7,
'FLORIDA':8,
'GEORGIA':9,
'HAWAII':10,
'IDAHO':11,
'ILLINOIS':12,
'INDIANA':13,
'IOWA':14,
'KANSAS':15,
'KENTUCKY':16,
'LOUISIANA':17,
'MAINE':18,
'MARYLAND':19,
'MASSACHUSETTS':20,
'MICHIGAN':21,
'MINNESOTA':22,
'MISSISSIPPI':23,
'MISSOURI':24,
'MONTANA':25,
'MONTANA':26,
'NEBRASKA':27,
'NEVADA':28,
'NEW HAMPSHIRE':29,
'NEW JERSEY':30,
'NEW MEXICO':31,
'NEW YORK':32,
'NORTH CAROLINA':33,
'NORTH DAKOTA':34,
'OHIO':35,
'OKLAHOMA':36,
'OREGON':37,
'PENNSYLVANIA':38,
'RHODE ISLAND':39,
'SOUTH CAROLINA':40,
'SOUTH DAKOTA':41,
'TENNESSEE':42,
'TEXAS':43,
'UTAH':44,
'VERMONT':45,
'VIRGINIA':46,
'WASHINGTON':47,
'WEST VIRGINIA':48,
'WISCONSIN':49,
'WYOMING':50,
'DISTRICT OF COLUMBIA':51,
'PUERTO RICO':52,}
#list of month names
global months
months = {"January":1,"February":2,"March":3,"April":4,"May":5,"June":6,"July":7,"August":8,"September":9,"October":10,"November":11,"December":12}
#################
### FUNCTIONS ###
#################
#squash the output between 0-1
def sigmoid(x):
#g=getGlobalRating()
g=1
return g/(x + g)
# get the danger index in our 1-5 range
def dangerrate(x):
x = sigmoid(x)
if x>=0.8:
return 1
elif x<0.8 and x>=0.6:
return 2
elif x<0.6 and x>=0.4:
return 3
elif x<0.4 and x>=0.2:
return 4
elif x<0.2:
return 5
# Get the global average (sum of averages)
def getGlobalRating():
#return the number of deaths and incidents over the number of events
return di_data['COUNT'].sum() / di_data['NUM_EVENTS'].sum()
def getStateRating(state):
state_ave = 0
# if we have data for that state, grabb it
if state in di_data['STATE'].unique():
state_ave = di_data.loc[di_data['STATE'] == state,'COUNT'].values[0] /di_data.loc[di_data['STATE'] == state,'NUM_EVENTS'].values[0]
else:
return -1
return dangerrate(state_ave)
#return the state events with locations in a dictionary
def getStateEvents(state="MD"):
input1 = states.index(state)
#check if we have data for the
if input1 in di_data['STATE'].unique():
return di_data.loc[di_data['STATE'] == input1].to_dict()
else:
return {}
#for some input get prediction
def getPrediction(state="MD", month=1):
input1 = [states.index(state),month]
return mnb.predict([input1])[0]
##############################################################################
print("""
Begining to parse files from Datasets directory.
Please Be Patient while we produce a model.
""")
# Main portion that takes care of data loading and processing from the historical files
#loop between our historical data files
for datafile in os.listdir(os.fsencode("Datasets")):
print("Reading file: ", datafile.decode('utf-8'))
# read the current file into datagram
data = pd.read_csv("Datasets/"+datafile.decode('utf-8'))
#clean the column names
data.columns = [x.strip() for x in data.columns]
#subset the data into our desired working data
data = data[['STATE','MONTH_NAME','INJURIES_DIRECT','INJURIES_INDIRECT','DEATHS_DIRECT','DEATHS_INDIRECT','BEGIN_LAT','BEGIN_LON','END_LAT','END_LON']]
# grabbing the location data of the file
loc_data = loc_data.append(data[['STATE','MONTH_NAME','INJURIES_DIRECT','INJURIES_INDIRECT','DEATHS_DIRECT','DEATHS_INDIRECT','BEGIN_LAT','BEGIN_LON','END_LAT','END_LON']],ignore_index=True)
# remove cached file for space reasons
del data
print("Cleaning up the data.")
# clean the NaNs
loc_data['END_LON'].fillna(0.000, inplace=True)
loc_data['END_LAT'].fillna(0.000, inplace=True)
loc_data['BEGIN_LON'].fillna(0.000, inplace=True)
loc_data['BEGIN_LAT'].fillna(0.000, inplace=True)
print("Obtaining number of events per state.")
#get the number of events for each state
for st in loc_data['STATE'].unique():
state_events[st] = loc_data.loc[loc_data['STATE']== st].shape[0]
print("Manipulating data into our dataframes.")
# grabbing the event data per state
for curr_row in loc_data.iterrows():
#trick: change to object
curr_row = curr_row[1]
#number of injuries and deaths in that state
total = curr_row['DEATHS_DIRECT'] + curr_row['DEATHS_INDIRECT'] + curr_row['INJURIES_DIRECT'] + curr_row['INJURIES_INDIRECT']
#total = curr_row[4] + curr_row[5] + curr_row[2] + curr_row[3]
#number of events in that state
num_event = state_events[curr_row['STATE']]
#num_event = state_events[curr_row[0]]
#get month name
mon = months[curr_row['MONTH_NAME']]
#mon = months[curr_row[1]]
#get danger class
rclass = dangerrate(total/num_event)
#grab lons and lats
blat = curr_row['BEGIN_LAT']
#blat = curr_row[6]
blon = curr_row['BEGIN_LON']
#eblon = curr_row[7]
elat = curr_row['END_LAT']
#elat = curr_row[8]
elon = curr_row['END_LON']
#elon = curr_row[9]
st = state_names[curr_row['STATE']]
#st = state_names[curr_row[0]]
# temporary dataframe to hold extracted data
t = | pd.DataFrame([[st, total, num_event, mon, blat, blon, elat, elon, rclass]], columns=['STATE','COUNT','NUM_EVENTS', 'MONTH_NAME','BEGIN_LAT','BEGIN_LON','END_LAT','END_LON', 'CLASS']) | pandas.DataFrame |
__version__ = '0.1.3'
__maintainer__ = '<NAME>'
__contributors__ = '<NAME>, <NAME>'
__email__ = '<EMAIL>'
__birthdate__ = '31.12.2019'
__status__ = 'prod' # options are: dev, test, prod
__license__ = 'BSD-3-Clause'
import pandas as pd
import yaml
from pathlib import Path
def loadConfigDict(configNames: tuple):
# pathLib syntax for windows, max, linux compatibility, see https://realpython.com/python-pathlib/ for an intro
"""
Generic function to load and open yaml config files
:param configNames: Tuple containing names of config files to be loaded
:return: Dictionary with opened yaml config files
"""
basePath = Path(__file__).parent.parent / 'config'
configDict = {}
for configName in configNames:
filePath = (basePath / configName).with_suffix('.yaml')
with open(filePath) as ipf:
configDict[configName] = yaml.load(ipf, Loader=yaml.SafeLoader)
return configDict
def createFileString(globalConfig: dict, fileKey: str, datasetID: str=None, manualLabel: str = '',
filetypeStr: str = 'csv'):
"""
Generic method used for fileString compilation throughout the VencoPy framework. This method does not write any
files but just creates the file name including the filetype suffix.
:param globalConfig: global config file for paths
:param fileKey: Manual specification of fileKey
:param datasetID: Manual specification of data set ID e.g. 'MiD17'
:param manualLabel: Optional manual label to add to filename
:param filetypeStr: filetype to be written to hard disk
:return: Full name of file to be written.
"""
if datasetID is None:
return f"{globalConfig['files'][fileKey]}_{globalConfig['labels']['runLabel']}_{manualLabel}.{filetypeStr}"
return f"{globalConfig['files'][datasetID][fileKey]}_{globalConfig['labels']['runLabel']}_{manualLabel}_" \
f"{datasetID}.{filetypeStr}"
def mergeVariables(data, variableData, variables):
"""
Global VencoPy function to merge MiD variables to trip distance, purpose or grid connection data.
:param data: trip diary data as given by tripDiaryBuilder and gridModeler
:param variableData: Survey data that holds specific variables for merge
:param variables: Name of variables that will be merged
:return: The merged data
"""
variableDataUnique = variableData.loc[~variableData['genericID'].duplicated(), :]
variables.append('genericID')
variableDataMerge = variableDataUnique.loc[:, variables].set_index('genericID')
if 'genericID' not in data.index.names:
data.set_index('genericID', inplace=True, drop=True)
mergedData = pd.concat([variableDataMerge, data], axis=1, join='inner')
mergedData.reset_index(inplace=True)
return mergedData
def mergeDataToWeightsAndDays(diaryData, ParseData):
return mergeVariables(data=diaryData, variableData=ParseData.data, variables=['tripStartWeekday', 'tripWeight'])
def calculateWeightedAverage(col, weightCol):
return sum(col * weightCol) / sum(weightCol)
def writeProfilesToCSV(profileDictOut, globalConfig: dict, singleFile=True, datasetID='MiD17'):
"""
Function to write VencoPy profiles to either one or five .csv files in the output folder specified in outputFolder.
:param outputFolder: path to output folder
:param profileDictOut: Dictionary with profile names in keys and profiles as pd.Series containing a VencoPy
profile each to be written in value
:param singleFile: If True, all profiles will be appended and written to one .csv file. If False, five files are
written
:param strAdd: String addition for filenames
:return: None
"""
if singleFile:
dataOut = | pd.DataFrame(profileDictOut) | pandas.DataFrame |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
import time
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
data = pd.read_csv(filename).dropna()
data = data.drop_duplicates()
# Delete uninteresting features
for feature in ["id", "date", "lat", "long"]:
data = data.drop(feature, axis=1)
# Delete invalid rows
for feature in ["price", "bedrooms", "sqft_living", "sqft_lot", "floors", "yr_built",
"zipcode", "sqft_living15", "sqft_lot15"]:
data = data[data[feature] > 0]
for feature in ["bathrooms", "sqft_basement", "yr_renovated"]:
data = data[data[feature] >= 0]
data = data[data["waterfront"].isin([0,1])]
data = data[data["view"].isin(range(5))]
data = data[data["condition"].isin(range(1,6))]
data = data[data["grade"].isin(range(1,14))]
# One hot vector
data["renovated"] = (data["yr_renovated"] / 1000).astype(int)
data = data.drop("yr_renovated", axis=1)
data["zipcode"] = data["zipcode"].astype(int)
data = pd.get_dummies(data, prefix='zipcode', columns=['zipcode'])
data.insert(loc=0, column="intercept", value=1)
response = data["price"]
data = data.drop("price", axis=1)
return (data, response)
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
X = X.drop("intercept", axis=1)
deviation_y = np.std(y)
for feature in X.columns:
feature_cov = np.cov(X[feature], y) / (np.std(X[feature]) * deviation_y)
feature_pearson = feature_cov[0, 1]
fig1 = px.scatter( | pd.DataFrame({'x': X[feature], 'y': y}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import datetime
import pandas as pd
from gmsdk import md, to_dict
md.init('13382753152', '940809')
CFFEX = ['IF', 'IH', 'IC', 'T', 'TF']
CZCE = ['CF', 'FG', 'MA', 'RM', 'SR', 'TA', 'ZC']
SHFE = ['AL', 'BU', 'CU', 'HC', 'NI', 'RB', 'RU', 'SN', 'ZN']
DCE = ['C', 'CS', 'I', 'J', 'JD', 'JM', 'L', 'M', 'P', 'PP', 'V', 'Y']
def mtsymbol_list(symbol_list):
z = len(symbol_list)
ret = ''
for i in range(z):
ret = ret + symbol_list[i] + ','
ret = ret[:len(ret) - 1]
return ret
def to_pd(var, index):
ret = []
for i in var:
ret.append(to_dict(i))
ret = | pd.DataFrame(ret) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
with | tm.assert_produces_warning(warn, match="concatenating bool-dtype") | pandas._testing.assert_produces_warning |
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import euclidean_distances
from mlos.Optimizers.ExperimentDesigner.UtilityFunctionOptimizers.UtilityFunctionOptimizer import UtilityFunctionOptimizer
from mlos.Optimizers.ExperimentDesigner.UtilityFunctions.UtilityFunction import UtilityFunction
from mlos.Optimizers.OptimizationProblem import OptimizationProblem
from mlos.Spaces import ContinuousDimension, DiscreteDimension, Point, SimpleHypergrid
from mlos.Spaces.Configs.ComponentConfigStore import ComponentConfigStore
from mlos.Spaces.HypergridAdapters import DiscreteToUnitContinuousHypergridAdapter
from mlos.Tracer import trace, traced
glow_worm_swarm_optimizer_config_store = ComponentConfigStore(
parameter_space=SimpleHypergrid(
name="glow_worm_swarm_optimizer_config",
dimensions=[
DiscreteDimension(name="num_initial_points_multiplier", min=1, max=10),
DiscreteDimension(name="num_worms", min=10, max=1000),
DiscreteDimension(name="num_iterations", min=1, max=20), # TODO: consider other stopping criteria too
ContinuousDimension(name="luciferin_decay_constant", min=0, max=1),
ContinuousDimension(name="luciferin_enhancement_constant", min=0, max=1),
ContinuousDimension(name="step_size", min=0, max=1), # TODO: make this adaptive
ContinuousDimension(name="initial_decision_radius", min=0, max=1, include_min=False),
ContinuousDimension(name="max_sensory_radius", min=0.5, max=10), # TODO: add constraints
DiscreteDimension(name="desired_num_neighbors", min=1, max=100), # TODO: add constraint to make it smaller than num_worms
ContinuousDimension(name="decision_radius_adjustment_constant", min=0, max=1)
]
),
default=Point(
num_initial_points_multiplier=5,
num_worms=100,
num_iterations=10,
luciferin_decay_constant=0.2,
luciferin_enhancement_constant=0.2,
step_size=0.01,
initial_decision_radius=0.2,
max_sensory_radius=2,
desired_num_neighbors=10,
decision_radius_adjustment_constant=0.05
)
)
class GlowWormSwarmOptimizer(UtilityFunctionOptimizer):
""" Searches the utility function for maxima using glowworms.
The first part of this has a good description:
https://www.hindawi.com/journals/mpe/2016/5481602/
The main benefits are:
1. It doesn't require a gradient
2. It is well parallelizeable (batchable)
The main drawback is that it queries the utility function many times, and that's somewhat slow, but it would be
cheap to optimize.
"""
def __init__(
self,
optimizer_config: Point,
optimization_problem: OptimizationProblem,
utility_function: UtilityFunction,
logger=None
):
UtilityFunctionOptimizer.__init__(self, optimizer_config, optimization_problem, utility_function, logger)
self.parameter_adapter = DiscreteToUnitContinuousHypergridAdapter(
adaptee=self.optimization_problem.parameter_space
)
self.dimension_names = [dimension.name for dimension in self.parameter_adapter.dimensions]
@trace()
def suggest(self, context_values_dataframe=None): # pylint: disable=unused-argument
""" Returns the next best configuration to try.
The idea is pretty simple:
1. We start with a random population of glowworms, whose luciferin levels are equal to their utility function value.
2. Each glowworm looks around for all other glowworms in its neighborhood and finds ones that are brighter.
3. Each glowworm randomly selects from its brighter neighbors the one to walk towards (with probability proportional to the diff in brightness).
4. Everybody takes a step.
5. Everybody updates step size to have the desired number of neighbors.
5. Update luciferin levels.
"""
# TODO: consider remembering great features from previous invocations of the suggest() method.
feature_values_dataframe = self.optimization_problem.parameter_space.random_dataframe(
num_samples=self.optimizer_config.num_worms * self.optimizer_config.num_initial_points_multiplier
)
utility_function_values = self.utility_function(feature_values_pandas_frame=feature_values_dataframe.copy(deep=False))
num_utility_function_values = len(utility_function_values.index)
if num_utility_function_values == 0:
config_to_suggest = Point.from_dataframe(feature_values_dataframe.iloc[[0]])
self.logger.debug(f"Suggesting: {str(config_to_suggest)} at random.")
return config_to_suggest
# TODO: keep getting configs until we have enough utility values to get started. Or assign 0 to missing ones,
# and let them climb out of their infeasible holes.
top_utility_values = utility_function_values.nlargest(n=self.optimizer_config.num_worms, columns=['utility'])
# TODO: could it be in place?
features_for_top_utility = self.parameter_adapter.project_dataframe(feature_values_dataframe.loc[top_utility_values.index], in_place=False)
worms = | pd.concat([features_for_top_utility, top_utility_values], axis=1) | pandas.concat |
from os.path import exists
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from k_choice.graphical.two_choice.graphs.hypercube import HyperCube
from k_choice.graphical.two_choice.graphs.random_regular_graph import RandomRegularGraph
from k_choice.graphical.two_choice.strategies.greedy_strategy import GreedyStrategy
from k_choice.graphical.two_choice.environment import run_strategy
from k_choice.graphical.two_choice.full_knowledge.RL.DQN.constants import MAX_LOAD_REWARD
N = 32
M = 32
RUNS_PER_D = 1000
def analyse_random_regular(n=N, m=M, runs_per_d=RUNS_PER_D):
vals = []
for d in range(1, n):
for run in range(runs_per_d):
if d < n / 2:
graph = RandomRegularGraph(n=n, d=d)
else:
graph_transposed = RandomRegularGraph(n=n, d=n - 1 - d)
graph = graph_transposed.transpose()
score = run_strategy(graph=graph, m=m, strategy=GreedyStrategy(graph=graph, m=m), reward_fun=MAX_LOAD_REWARD,
print_behaviour=False)
maxload = -score
vals.append([d, maxload])
df = pd.DataFrame(data=vals, columns=["d", "score"])
output_path = f'data/{n}_{m}_random_regular_greedy_analysis.csv'
df.to_csv(output_path, mode='a', index=False, header=not exists(output_path))
def create_plot():
df = | pd.read_csv("data/32_32_random_regular_greedy_analysis.csv") | pandas.read_csv |
# Preppin' Data 2021 Week 42
import pandas as pd
import numpy as np
# Input the data
df = pd.read_csv('unprepped_data\\PD 2021 Wk 42 Input.csv')
# Create new rows for any date missing between the first and last date in the data set provided
# build a data frame of all dates from min to max
min_date = min(df['Date'])
max_date = max(df['Date'])
idx = | pd.date_range(min_date, max_date) | pandas.date_range |
import pandas as pd
from sklearn import preprocessing
from scipy.sparse import coo_matrix
import numpy as np
def quora_leaky_extracting(concat):
tid1 = concat['q1_id'].values
tid2 = concat['q2_id'].values
doc_number = np.max((tid1.max(), tid2.max())) + 1
adj = coo_matrix((np.ones(len(tid1) * 2), (np.concatenate(
[tid1, tid2]), np.concatenate([tid2, tid1]))), (doc_number, doc_number))
degree = adj.sum(axis=0)
concat['q1_id_degree'] = concat['q1_id'].apply(lambda x: degree[0, x])
concat['q2_id_degree'] = concat['q2_id'].apply(lambda x: degree[0, x])
tmp = adj * adj
concat['path'] = concat.apply(
lambda row: tmp[int(row['q1_id']), int(row['q2_id'])], axis=1)
return concat
def load_quora(path='./quora'):
print('---------- Loading QuoraQP ----------')
tr = pd.read_csv(path + '/train.tsv', delimiter='\t', header=None)
tr.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
val = pd.read_csv(path + '/dev.tsv', delimiter='\t', header=None)
val.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
te = pd.read_csv(path + '/test.tsv', delimiter='\t', header=None)
te.columns = ['is_duplicate', 'question1', 'question2', 'pair_id']
data = pd.concat([tr, val, te]).fillna('')
questions = list(data['question1'].values) + list(data['question2'].values)
le = preprocessing.LabelEncoder()
le.fit(questions)
data['q1_id'] = le.transform(data['question1'].values)
data['q2_id'] = le.transform(data['question2'].values)
data = quora_leaky_extracting(data)
label = data["is_duplicate"].to_numpy()
s1_freq = data["q1_id_degree"].to_numpy()
s2_freq = data["q2_id_degree"].to_numpy()
s1s2_inter = data["path"].to_numpy()
X = pd.DataFrame({
"s1_freq": s1_freq,
"s2_freq": s2_freq,
"s1s2_inter": s1s2_inter
})
Y = label
print('Success!')
return X, Y
def load_artificial_dataset(path='./artificial_dataset'):
print('---------- Loading artificial dataset ----------')
tr = pd.read_csv(path + '/train.tsv', delimiter='\t', header=None)
tr.columns = ['is_duplicate', 'question1', 'question2']
val = pd.read_csv(path + '/dev.tsv', delimiter='\t', header=None)
val.columns = ['is_duplicate', 'question1', 'question2']
te = | pd.read_csv(path + '/test.tsv', delimiter='\t', header=None) | pandas.read_csv |
""" test parquet compat """
import datetime
from distutils.version import LooseVersion
import os
from warnings import catch_warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
from pandas.io.parquet import (
FastParquetImpl,
PyArrowImpl,
get_engine,
read_parquet,
to_parquet,
)
try:
import pyarrow # noqa
_HAVE_PYARROW = True
except ImportError:
_HAVE_PYARROW = False
try:
import fastparquet # noqa
_HAVE_FASTPARQUET = True
except ImportError:
_HAVE_FASTPARQUET = False
pytestmark = pytest.mark.filterwarnings(
"ignore:RangeIndex.* is deprecated:DeprecationWarning"
)
# setup engines & skips
@pytest.fixture(
params=[
pytest.param(
"fastparquet",
marks=pytest.mark.skipif(
not _HAVE_FASTPARQUET, reason="fastparquet is not installed"
),
),
pytest.param(
"pyarrow",
marks=pytest.mark.skipif(
not _HAVE_PYARROW, reason="pyarrow is not installed"
),
),
]
)
def engine(request):
return request.param
@pytest.fixture
def pa():
if not _HAVE_PYARROW:
pytest.skip("pyarrow is not installed")
return "pyarrow"
@pytest.fixture
def fp():
if not _HAVE_FASTPARQUET:
pytest.skip("fastparquet is not installed")
return "fastparquet"
@pytest.fixture
def df_compat():
return pd.DataFrame({"A": [1, 2, 3], "B": "foo"})
@pytest.fixture
def df_cross_compat():
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
# 'c': np.arange(3, 6).astype('u1'),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("20130101", periods=3),
# 'g': pd.date_range('20130101', periods=3,
# tz='US/Eastern'),
# 'h': pd.date_range('20130101', periods=3, freq='ns')
}
)
return df
@pytest.fixture
def df_full():
return pd.DataFrame(
{
"string": list("abc"),
"string_with_nan": ["a", np.nan, "c"],
"string_with_none": ["a", None, "c"],
"bytes": [b"foo", b"bar", b"baz"],
"unicode": ["foo", "bar", "baz"],
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
"float_with_nan": [2.0, np.nan, 3.0],
"bool": [True, False, True],
"datetime": pd.date_range("20130101", periods=3),
"datetime_with_nat": [
pd.Timestamp("20130101"),
pd.NaT,
pd.Timestamp("20130103"),
],
}
)
def check_round_trip(
df,
engine=None,
path=None,
write_kwargs=None,
read_kwargs=None,
expected=None,
check_names=True,
check_like=False,
repeat=2,
):
"""Verify parquet serializer and deserializer produce the same results.
Performs a pandas to disk and disk to pandas round trip,
then compares the 2 resulting DataFrames to verify equality.
Parameters
----------
df: Dataframe
engine: str, optional
'pyarrow' or 'fastparquet'
path: str, optional
write_kwargs: dict of str:str, optional
read_kwargs: dict of str:str, optional
expected: DataFrame, optional
Expected deserialization result, otherwise will be equal to `df`
check_names: list of str, optional
Closed set of column names to be compared
check_like: bool, optional
If True, ignore the order of index & columns.
repeat: int, optional
How many times to repeat the test
"""
write_kwargs = write_kwargs or {"compression": None}
read_kwargs = read_kwargs or {}
if expected is None:
expected = df
if engine:
write_kwargs["engine"] = engine
read_kwargs["engine"] = engine
def compare(repeat):
for _ in range(repeat):
df.to_parquet(path, **write_kwargs)
with catch_warnings(record=True):
actual = read_parquet(path, **read_kwargs)
tm.assert_frame_equal(
expected, actual, check_names=check_names, check_like=check_like
)
if path is None:
with tm.ensure_clean() as path:
compare(repeat)
else:
compare(repeat)
def test_invalid_engine(df_compat):
with pytest.raises(ValueError):
check_round_trip(df_compat, "foo", "bar")
def test_options_py(df_compat, pa):
# use the set option
with pd.option_context("io.parquet.engine", "pyarrow"):
check_round_trip(df_compat)
def test_options_fp(df_compat, fp):
# use the set option
with pd.option_context("io.parquet.engine", "fastparquet"):
check_round_trip(df_compat)
def test_options_auto(df_compat, fp, pa):
# use the set option
with pd.option_context("io.parquet.engine", "auto"):
check_round_trip(df_compat)
def test_options_get_engine(fp, pa):
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "pyarrow"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "fastparquet"):
assert isinstance(get_engine("auto"), FastParquetImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
with pd.option_context("io.parquet.engine", "auto"):
assert isinstance(get_engine("auto"), PyArrowImpl)
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
def test_get_engine_auto_error_message():
# Expect different error messages from get_engine(engine="auto")
# if engines aren't installed vs. are installed but bad version
from pandas.compat._optional import VERSIONS
# Do we have engines installed, but a bad version of them?
pa_min_ver = VERSIONS.get("pyarrow")
fp_min_ver = VERSIONS.get("fastparquet")
have_pa_bad_version = (
False
if not _HAVE_PYARROW
else LooseVersion(pyarrow.__version__) < LooseVersion(pa_min_ver)
)
have_fp_bad_version = (
False
if not _HAVE_FASTPARQUET
else LooseVersion(fastparquet.__version__) < LooseVersion(fp_min_ver)
)
# Do we have usable engines installed?
have_usable_pa = _HAVE_PYARROW and not have_pa_bad_version
have_usable_fp = _HAVE_FASTPARQUET and not have_fp_bad_version
if not have_usable_pa and not have_usable_fp:
# No usable engines found.
if have_pa_bad_version:
match = f"Pandas requires version .{pa_min_ver}. or newer of .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .pyarrow."
with pytest.raises(ImportError, match=match):
get_engine("auto")
if have_fp_bad_version:
match = f"Pandas requires version .{fp_min_ver}. or newer of .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
else:
match = "Missing optional dependency .fastparquet."
with pytest.raises(ImportError, match=match):
get_engine("auto")
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=pa, compression=None)
result = read_parquet(path, engine=fp)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=fp, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
# cross-compat with differing reading/writing engines
if (
LooseVersion(pyarrow.__version__) < "0.15"
and LooseVersion(pyarrow.__version__) >= "0.13"
):
pytest.xfail(
"Reading fastparquet with pyarrow in 0.14 fails: "
"https://issues.apache.org/jira/browse/ARROW-6492"
)
df = df_cross_compat
with tm.ensure_clean() as path:
df.to_parquet(path, engine=fp, compression=None)
with catch_warnings(record=True):
result = read_parquet(path, engine=pa)
tm.assert_frame_equal(result, df)
result = read_parquet(path, engine=pa, columns=["a", "d"])
tm.assert_frame_equal(result, df[["a", "d"]])
class Base:
def check_error_on_write(self, df, engine, exc):
# check that we are raising the exception on writing
with tm.ensure_clean() as path:
with pytest.raises(exc):
to_parquet(df, path, engine, compression=None)
class TestBasic(Base):
def test_error(self, engine):
for obj in [
pd.Series([1, 2, 3]),
1,
"foo",
pd.Timestamp("20130101"),
np.array([1, 2, 3]),
]:
self.check_error_on_write(obj, engine, ValueError)
def test_columns_dtypes(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# unicode
df.columns = ["foo", "bar"]
check_round_trip(df, engine)
def test_columns_dtypes_invalid(self, engine):
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
# numeric
df.columns = [0, 1]
self.check_error_on_write(df, engine, ValueError)
# bytes
df.columns = [b"foo", b"bar"]
self.check_error_on_write(df, engine, ValueError)
# python object
df.columns = [
datetime.datetime(2011, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 1, 1),
]
self.check_error_on_write(df, engine, ValueError)
@pytest.mark.parametrize("compression", [None, "gzip", "snappy", "brotli"])
def test_compression(self, engine, compression):
if compression == "snappy":
pytest.importorskip("snappy")
elif compression == "brotli":
pytest.importorskip("brotli")
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine, write_kwargs={"compression": compression})
def test_read_columns(self, engine):
# GH18154
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
expected = pd.DataFrame({"string": list("abc")})
check_round_trip(
df, engine, expected=expected, read_kwargs={"columns": ["string"]}
)
def test_write_index(self, engine):
check_names = engine != "fastparquet"
df = pd.DataFrame({"A": [1, 2, 3]})
check_round_trip(df, engine)
indexes = [
[2, 3, 4],
pd.date_range("20130101", periods=3),
list("abc"),
[1, 3, 4],
]
# non-default index
for index in indexes:
df.index = index
if isinstance(index, pd.DatetimeIndex):
df.index = df.index._with_freq(None) # freq doesnt round-trip
check_round_trip(df, engine, check_names=check_names)
# index with meta-data
df.index = [0, 1, 2]
df.index.name = "foo"
check_round_trip(df, engine)
def test_write_multiindex(self, pa):
# Not supported in fastparquet as of 0.1.3 or older pyarrow version
engine = pa
df = pd.DataFrame({"A": [1, 2, 3]})
index = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df.index = index
check_round_trip(df, engine)
def test_write_column_multiindex(self, engine):
# column multi-index
mi_columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns)
self.check_error_on_write(df, engine, ValueError)
def test_multiindex_with_columns(self, pa):
engine = pa
dates = pd.date_range("01-Jan-2018", "01-Dec-2018", freq="MS")
df = pd.DataFrame(np.random.randn(2 * len(dates), 3), columns=list("ABC"))
index1 = pd.MultiIndex.from_product(
[["Level1", "Level2"], dates], names=["level", "date"]
)
index2 = index1.copy(names=None)
for index in [index1, index2]:
df.index = index
check_round_trip(df, engine)
check_round_trip(
df, engine, read_kwargs={"columns": ["A", "B"]}, expected=df[["A", "B"]]
)
def test_write_ignoring_index(self, engine):
# ENH 20768
# Ensure index=False omits the index from the written Parquet file.
df = pd.DataFrame({"a": [1, 2, 3], "b": ["q", "r", "s"]})
write_kwargs = {"compression": None, "index": False}
# Because we're dropping the index, we expect the loaded dataframe to
# have the default integer index.
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore custom index
df = pd.DataFrame(
{"a": [1, 2, 3], "b": ["q", "r", "s"]}, index=["zyx", "wvu", "tsr"]
)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
# Ignore multi-indexes as well.
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = pd.DataFrame(
{"one": list(range(8)), "two": [-i for i in range(8)]}, index=arrays
)
expected = df.reset_index(drop=True)
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
class TestParquetPyArrow(Base):
def test_basic(self, pa, df_full):
df = df_full
# additional supported types for pyarrow
dti = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
dti = dti._with_freq(None) # freq doesnt round-trip
df["datetime_tz"] = dti
df["bool_with_none"] = [True, None, True]
check_round_trip(df, pa)
def test_basic_subset_columns(self, pa, df_full):
# GH18628
df = df_full
# additional supported types for pyarrow
df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
check_round_trip(
df,
pa,
expected=df[["string", "int"]],
read_kwargs={"columns": ["string", "int"]},
)
def test_duplicate_columns(self, pa):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_error_on_write(df, pa, ValueError)
def test_unsupported(self, pa):
if LooseVersion(pyarrow.__version__) < LooseVersion("0.15.1.dev"):
# period - will be supported using an extension type with pyarrow 1.0
df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_error_on_write(df, pa, Exception)
# timedelta
df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)})
self.check_error_on_write(df, pa, NotImplementedError)
# mixed python objects
df = pd.DataFrame({"a": ["a", 1, 2.0]})
# pyarrow 0.11 raises ArrowTypeError
# older pyarrows raise ArrowInvalid
self.check_error_on_write(df, pa, Exception)
def test_categorical(self, pa):
# supported in >= 0.7.0
df = pd.DataFrame()
df["a"] = pd.Categorical(list("abcdef"))
# test for null, out-of-order values, and unobserved category
df["b"] = pd.Categorical(
["bar", "foo", "foo", "bar", None, "bar"],
dtype=pd.CategoricalDtype(["foo", "bar", "baz"]),
)
# test for ordered flag
df["c"] = pd.Categorical(
["a", "b", "c", "a", "c", "b"], categories=["b", "c", "d"], ordered=True
)
if LooseVersion(pyarrow.__version__) >= LooseVersion("0.15.0"):
check_round_trip(df, pa)
else:
# de-serialized as object for pyarrow < 0.15
expected = df.astype(object)
check_round_trip(df, pa, expected=expected)
def test_s3_roundtrip(self, df_compat, s3_resource, pa):
# GH #19134
check_round_trip(df_compat, pa, path="s3://pandas-test/pyarrow.parquet")
@td.skip_if_no("s3fs")
@pytest.mark.parametrize("partition_col", [["A"], []])
def test_s3_roundtrip_for_dir(self, df_compat, s3_resource, pa, partition_col):
from pandas.io.s3 import get_fs as get_s3_fs
# GH #26388
# https://github.com/apache/arrow/blob/master/python/pyarrow/tests/test_parquet.py#L2716
# As per pyarrow partitioned columns become 'categorical' dtypes
# and are added to back of dataframe on read
expected_df = df_compat.copy()
if partition_col:
expected_df[partition_col] = expected_df[partition_col].astype("category")
check_round_trip(
df_compat,
pa,
expected=expected_df,
path="s3://pandas-test/parquet_dir",
write_kwargs={
"partition_cols": partition_col,
"compression": None,
"filesystem": get_s3_fs(),
},
check_like=True,
repeat=1,
)
def test_partition_cols_supported(self, pa, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(path, partition_cols=partition_cols, compression=None)
import pyarrow.parquet as pq
dataset = pq.ParquetDataset(path, validate_schema=False)
assert len(dataset.partitions.partition_names) == 2
assert dataset.partitions.partition_names == set(partition_cols)
def test_partition_cols_string(self, pa, df_full):
# GH #27117
partition_cols = "bool"
partition_cols_list = [partition_cols]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(path, partition_cols=partition_cols, compression=None)
import pyarrow.parquet as pq
dataset = pq.ParquetDataset(path, validate_schema=False)
assert len(dataset.partitions.partition_names) == 1
assert dataset.partitions.partition_names == set(partition_cols_list)
def test_empty_dataframe(self, pa):
# GH #27339
df = pd.DataFrame()
check_round_trip(df, pa)
def test_write_with_schema(self, pa):
import pyarrow
df = pd.DataFrame({"x": [0, 1]})
schema = pyarrow.schema([pyarrow.field("x", type=pyarrow.bool_())])
out_df = df.astype(bool)
check_round_trip(df, pa, write_kwargs={"schema": schema}, expected=out_df)
@td.skip_if_no("pyarrow", min_version="0.15.0")
def test_additional_extension_arrays(self, pa):
# test additional ExtensionArrays that are supported through the
# __arrow_array__ protocol
df = pd.DataFrame(
{
"a": pd.Series([1, 2, 3], dtype="Int64"),
"b": pd.Series([1, 2, 3], dtype="UInt32"),
"c": pd.Series(["a", None, "c"], dtype="string"),
}
)
if LooseVersion(pyarrow.__version__) >= LooseVersion("0.16.0"):
expected = df
else:
# de-serialized as plain int / object
expected = df.assign(
a=df.a.astype("int64"), b=df.b.astype("int64"), c=df.c.astype("object")
)
check_round_trip(df, pa, expected=expected)
df = pd.DataFrame({"a": pd.Series([1, 2, 3, None], dtype="Int64")})
if LooseVersion(pyarrow.__version__) >= LooseVersion("0.16.0"):
expected = df
else:
# if missing values in integer, currently de-serialized as float
expected = df.assign(a=df.a.astype("float64"))
check_round_trip(df, pa, expected=expected)
@td.skip_if_no("pyarrow", min_version="0.16.0")
def test_additional_extension_types(self, pa):
# test additional ExtensionArrays that are supported through the
# __arrow_array__ protocol + by defining a custom ExtensionType
df = pd.DataFrame(
{
# Arrow does not yet support struct in writing to Parquet (ARROW-1644)
# "c": pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2), (3, 4)]),
"d": pd.period_range("2012-01-01", periods=3, freq="D"),
}
)
check_round_trip(df, pa)
@td.skip_if_no("pyarrow", min_version="0.14")
def test_timestamp_nanoseconds(self, pa):
# with version 2.0, pyarrow defaults to writing the nanoseconds, so
# this should work without error
df = pd.DataFrame({"a": pd.date_range("2017-01-01", freq="1n", periods=10)})
check_round_trip(df, pa, write_kwargs={"version": "2.0"})
class TestParquetFastParquet(Base):
@td.skip_if_no("fastparquet", min_version="0.3.2")
def test_basic(self, fp, df_full):
df = df_full
dti = pd.date_range("20130101", periods=3, tz="US/Eastern")
dti = dti._with_freq(None) # freq doesnt round-trip
df["datetime_tz"] = dti
df["timedelta"] = pd.timedelta_range("1 day", periods=3)
check_round_trip(df, fp)
@pytest.mark.skip(reason="not supported")
def test_duplicate_columns(self, fp):
# not currently able to handle duplicate columns
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
self.check_error_on_write(df, fp, ValueError)
def test_bool_with_none(self, fp):
df = pd.DataFrame({"a": [True, None, False]})
expected = pd.DataFrame({"a": [1.0, np.nan, 0.0]}, dtype="float16")
check_round_trip(df, fp, expected=expected)
def test_unsupported(self, fp):
# period
df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)})
self.check_error_on_write(df, fp, ValueError)
# mixed
df = pd.DataFrame({"a": ["a", 1, 2.0]})
self.check_error_on_write(df, fp, ValueError)
def test_categorical(self, fp):
df = pd.DataFrame({"a": pd.Categorical(list("abc"))})
check_round_trip(df, fp)
def test_filter_row_groups(self, fp):
d = {"a": list(range(0, 3))}
df = pd.DataFrame(d)
with tm.ensure_clean() as path:
df.to_parquet(path, fp, compression=None, row_group_offsets=1)
result = read_parquet(path, fp, filters=[("a", "==", 0)])
assert len(result) == 1
def test_s3_roundtrip(self, df_compat, s3_resource, fp):
# GH #19134
check_round_trip(df_compat, fp, path="s3://pandas-test/fastparquet.parquet")
def test_partition_cols_supported(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
partition_cols=partition_cols,
compression=None,
)
assert os.path.exists(path)
import fastparquet # noqa: F811
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_partition_cols_string(self, fp, df_full):
# GH #27117
partition_cols = "bool"
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
partition_cols=partition_cols,
compression=None,
)
assert os.path.exists(path)
import fastparquet # noqa: F811
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 1
def test_partition_on_supported(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with tm.ensure_clean_dir() as path:
df.to_parquet(
path,
engine="fastparquet",
compression=None,
partition_on=partition_cols,
)
assert os.path.exists(path)
import fastparquet # noqa: F811
actual_partition_cols = fastparquet.ParquetFile(path, False).cats
assert len(actual_partition_cols) == 2
def test_error_on_using_partition_cols_and_partition_on(self, fp, df_full):
# GH #23283
partition_cols = ["bool", "int"]
df = df_full
with pytest.raises(ValueError):
with | tm.ensure_clean_dir() | pandas._testing.ensure_clean_dir |
from qfengine.data.price.price_handler import PriceHandler
from qfengine.asset.universe.static import StaticUniverse
import functools
from qfengine import settings
import numpy as np
import pandas as pd
import pytz
from typing import List
class BacktestPriceHandler(PriceHandler):
def __init__(
self,
price_data_sources:List,
universe = None,
**kwargs
):
super().__init__(price_data_sources = price_data_sources,
universe = universe,
**kwargs
)
self._assets_bid_ask_frames = {}
if self.universe is None:
self.universe = StaticUniverse(self.assetsList(**kwargs))
if settings.PRINT_EVENTS:
print(
'PriceHandler Defaulted Universe: %s' %self.universe
)
if 'preload_bid_ask_data' in kwargs:
if kwargs['preload_bid_ask_data']:
if settings.PRINT_EVENTS:
print("Preloading bid_ask data of assets in universe")
(self._get_bid_ask_df(s) for s in universe.get_assets())
def assetsDF(self, **kwargs):
df = pd.DataFrame()
for ds in self.price_data_sources:
new_df = ds.assetsDF(**kwargs)
df = df.append(new_df.reindex([i for i in new_df.index if i not in df.index]))
if self.universe:
df = df.reindex([i for i in df.index if i in self.universe.get_assets()])
return df
def assetsList(self, **kwargs):
return list(self.assetsDF(**kwargs).index.values)
def sectorsList(self):
result = []
for ds in self.price_data_sources:
result = result + [s for s in ds.sectorsList if s not in result]
return result
def get_assets_earliest_available_dt(self, asset_symbols:List[str]):
dt_range_df = self._get_dt_range_df(asset_symbols)
return self._format_dt(max(dt_range_df.start_dt.values))
def get_assets_latest_available_dt(self, asset_symbols:List[str]):
dt_range_df = self._get_dt_range_df(asset_symbols)
return self._format_dt(min(dt_range_df.end_dt.values))
#!---| Bid & Ask Functions |---!#
@functools.lru_cache(maxsize = 1024 * 1024)
def get_asset_latest_bid_price(self, dt, asset_symbol):
# TODO: Check for asset in Universe
bid_ask_df = self._get_bid_ask_df(asset_symbol)
bid = np.NaN
try:
bid = bid_ask_df.iloc[bid_ask_df.index.get_loc(dt, method='pad')]['bid']
except KeyError: # Before start date
pass
return bid
@functools.lru_cache(maxsize = 1024 * 1024)
def get_asset_latest_ask_price(self, dt, asset_symbol):
"""
"""
# TODO: Check for asset in Universe
bid_ask_df = self._get_bid_ask_df(asset_symbol)
ask = np.NaN
try:
ask = bid_ask_df.iloc[bid_ask_df.index.get_loc(dt, method='pad')]['ask']
except KeyError: # Before start date
pass
return ask
def get_asset_latest_bid_ask_price(self, dt, asset_symbol):
"""
"""
# TODO: For the moment this is sufficient for OHLCV
# data, which only usually provides mid prices
# This will need to be revisited when handling intraday
# bid/ask time series.
# It has been added as an optimisation mechanism for
# interday backtests.
bid = self.get_asset_latest_bid_price(dt, asset_symbol)
return (bid, bid)
def get_asset_latest_mid_price(self, dt, asset_symbol):
"""
"""
bid_ask = self.get_asset_latest_bid_ask_price(dt, asset_symbol)
try:
mid = (bid_ask[0] + bid_ask[1]) / 2.0
except Exception:
# TODO: Log this
mid = np.NaN
return mid
#!---| Daily Price (OHLCV) Functions |---!#
def get_assets_historical_closes(self,
asset_symbols:List[str],
start_dt = None,
end_dt = None,
adjusted=False
):
"""
"""
prices_df = None
for ds in self.price_data_sources:
try:
prices_df = ds.get_assets_historical_price_dfs(
*asset_symbols,
start_dt=start_dt,
end_dt = end_dt,
price = 'close',
adjusted=adjusted
).sort_index()
if not prices_df.empty:
break
except Exception:
raise
if prices_df is None:
return pd.DataFrame(columns = asset_symbols)
else:
assert len(asset_symbols) == prices_df.shape[1]
if start_dt is not None:
prices_df = prices_df[prices_df.index >= self._format_dt(start_dt)]
if end_dt is not None:
prices_df = prices_df[prices_df.index <= self._format_dt(end_dt)]
market_close_dt = self._convert_dt_to_date(end_dt) + pd.Timedelta(hours=16, minutes=00)
if self._format_dt(end_dt) < market_close_dt: #---| rid of last index if market is not close yet
prices_df = prices_df.iloc[:-1]
return prices_df
def get_assets_historical_opens(self,
asset_symbols:List[str],
start_dt = None,
end_dt = None,
adjusted=False
):
"""
"""
prices_df = None
for ds in self.price_data_sources:
try:
prices_df = ds.get_assets_historical_price_dfs(
*asset_symbols,
start_dt = start_dt,
end_dt = end_dt,
price = 'open',
adjusted=adjusted
).sort_index()
if not prices_df.empty:
break
except Exception:
raise
if prices_df is None:
return pd.DataFrame(columns = asset_symbols)
else:
assert len(asset_symbols) == prices_df.shape[1]
if start_dt is not None:
prices_df = prices_df[prices_df.index >= self._format_dt(start_dt)]
if end_dt is not None:
prices_df = prices_df[prices_df.index <= self._format_dt(end_dt)]
market_open_dt = self._convert_dt_to_date(end_dt) + pd.Timedelta(hours=9, minutes=30)
if self._format_dt(end_dt) < market_open_dt:
prices_df = prices_df.iloc[:-1]
return prices_df
def get_assets_historical_highs(self,
asset_symbols:List[str],
start_dt = None,
end_dt = None,
adjusted=False
):
"""
"""
prices_df = None
for ds in self.price_data_sources:
try:
prices_df = ds.get_assets_historical_price_dfs(
*asset_symbols,
start_dt=start_dt,
end_dt = end_dt,
price = 'high',
adjusted=adjusted
).sort_index()
if not prices_df.empty:
break
except Exception:
raise
if prices_df is None:
return pd.DataFrame(columns = asset_symbols)
else:
assert len(asset_symbols) == prices_df.shape[1]
if start_dt is not None:
prices_df = prices_df[prices_df.index >= self._format_dt(start_dt)]
if end_dt is not None:
prices_df = prices_df[prices_df.index <= self._format_dt(end_dt)]
market_close_dt = self._convert_dt_to_date(end_dt) + pd.Timedelta(hours=16, minutes=00)
if self._format_dt(end_dt) < market_close_dt: #---| rid of last index if market is not close yet
prices_df = prices_df.iloc[:-1]
return prices_df
def get_assets_historical_lows(self,
asset_symbols:List[str],
start_dt = None,
end_dt = None,
adjusted=False
):
"""
"""
prices_df = None
for ds in self.price_data_sources:
try:
prices_df = ds.get_assets_historical_price_dfs(
*asset_symbols,
start_dt=start_dt,
end_dt = end_dt,
price = 'low',
adjusted=adjusted
).sort_index()
if not prices_df.empty:
break
except Exception:
raise
if prices_df is None:
return pd.DataFrame(columns = asset_symbols)
else:
assert len(asset_symbols) == prices_df.shape[1]
if start_dt is not None:
prices_df = prices_df[prices_df.index >= self._format_dt(start_dt)]
if end_dt is not None:
prices_df = prices_df[prices_df.index <= self._format_dt(end_dt)]
market_close_dt = self._convert_dt_to_date(end_dt) + pd.Timedelta(hours=16, minutes=00)
if self._format_dt(end_dt) < market_close_dt: #---| rid of last index if market is not close yet
prices_df = prices_df.iloc[:-1]
return prices_df
def get_assets_historical_volumes(self,
asset_symbols:List[str],
start_dt = None,
end_dt = None,
adjusted=False
):
"""
"""
prices_df = None
for ds in self.price_data_sources:
try:
prices_df = ds.get_assets_historical_price_dfs(
*asset_symbols,
start_dt=start_dt,
end_dt = end_dt,
price = 'volume',
adjusted=adjusted
).sort_index()
if not prices_df.empty:
break
except Exception:
raise
if prices_df is None:
return pd.DataFrame(columns = asset_symbols)
else:
assert len(asset_symbols) == prices_df.shape[1]
if start_dt is not None:
prices_df = prices_df[prices_df.index >= self._format_dt(start_dt)]
if end_dt is not None:
prices_df = prices_df[prices_df.index <= self._format_dt(end_dt)]
market_close_dt = self._convert_dt_to_date(end_dt) + pd.Timedelta(hours=16, minutes=00)
if self._format_dt(end_dt) < market_close_dt: #---| rid of last index if market is not close yet
prices_df = prices_df.iloc[:-1]
return prices_df
#!---| BACKEND FUNCS
def _reset_cached_frames(self):
self._assets_bid_ask_frames = {}
def _convert_dt_to_date(self, dt):
return pd.Timestamp(
self._format_dt(dt).date(),
tz = settings.TIMEZONE
)
def _format_dt(self, dt):
try:
return pd.Timestamp(dt).tz_convert(settings.TIMEZONE)
except TypeError:
try:
return pd.Timestamp(dt).tz_localize(settings.TIMEZONE)
except:
raise
def _get_bid_ask_df(self, asset_symbol):
if asset_symbol not in self._assets_bid_ask_frames:
for ds in self.price_data_sources:
try:
self._assets_bid_ask_frames[
asset_symbol
] = ds.get_assets_bid_ask_dfs(
asset_symbol
)[asset_symbol]
break
except:
pass
assert asset_symbol in self._assets_bid_ask_frames
return self._assets_bid_ask_frames[asset_symbol]
def _get_dt_range_df(self, asset_symbols:List[str]):
symbols = asset_symbols
result_df = | pd.DataFrame() | pandas.DataFrame |
import geopandas
import pandas as pd
import math
def build_ncov_geodf(day_df):
world_lines = geopandas.read_file('zip://./shapefiles/ne_50m_admin_0_countries.zip')
world = world_lines[(world_lines['POP_EST'] > 0) & (world_lines['ADMIN'] != 'Antarctica')]
world = world.rename(columns={'ADMIN': 'name'})
china = world_lines[world_lines['ADMIN'] == 'China']
# layers: ['gadm36_CHN_0', 'gadm36_CHN_1', 'gadm36_CHN_2', 'gadm36_CHN_3']
china_provinces = geopandas.read_file('./shapefiles/gadm36_CHN.gpkg', layer='gadm36_CHN_1')
china_provinces = china_provinces.rename(columns={'NAME_1': 'name'})
china_cities = geopandas.read_file('./shapefiles/gadm36_CHN.gpkg', layer='gadm36_CHN_2')
china_cities = china_cities.rename(columns={'NAME_2': 'name'})
# set to same projection
china_provinces.crs = china.crs
china_cities.crs = china.crs
state_lines = geopandas.read_file('zip://./shapefiles/ne_50m_admin_1_states_provinces.zip')
us_state_lines = state_lines[state_lines['iso_a2'].isin(['US','CA','AU'])]
# merge with coronavirus data
us_state_ncov = us_state_lines.merge(day_df, left_on='name', right_on='Province/State')
# merge with coronavirus data
china_provinces_ncov = china_provinces.merge(day_df, left_on='name', right_on='Province/State')
china_cities_ncov = china_cities.merge(day_df, left_on='name', right_on='Province/State')
# add Hong Konng data to Guangdong province data
g_idx = china_provinces['name'] == 'Guangdong'
hk_idx = day_df['Province/State'] == 'Hong Kong'
if g_idx.any() and hk_idx.any():
hk_confirmed = day_df.loc[hk_idx, 'Confirmed'].values[0]
china_provinces_ncov.loc[g_idx, 'Confirmed'] += hk_confirmed
# deselect countries we already dealt with
rest_of_world = world[~world['name'].isin(['China','United States of America','Australia','Canada'])]
# merge with coronavirus data
world_ncov = rest_of_world.merge(day_df, left_on='name', right_on='Country/Region')
cols = ['name', 'Confirmed', 'geometry']
ncov = pd.concat([world_ncov[cols], us_state_ncov[cols], china_provinces_ncov[cols], china_cities_ncov[cols]],
ignore_index=True)
return ncov
def create_location(row):
if | pd.isna(row['Province/State']) | pandas.isna |
import pandas as pd
import json
import numpy as np
from collections import Counter
from operator import itemgetter
def create_edgelist(transactions_file, clients_file, companies_file, atms_file):
transactions = pd.read_csv(transactions_file)
clients = | pd.read_csv(clients_file) | pandas.read_csv |
from pathlib import Path
import os
import sys
os.environ['DISPLAY'] = ':1'
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.stats.multitest import multipletests
import scipy.stats as stats
import seaborn as sns
import cytometer.stats
import cytometer.data
# whether to plot and save figures
SAVE_FIGS = False
# script name to identify this experiment
experiment_id = 'arl15del2_exp_0003_phenotyping'
# cross-platform home directory
home = str(Path.home())
sys.path.extend([os.path.join(home, 'Software/cytometer')])
# data paths
root_data_dir = os.path.join(home, 'Data/cytometer_data/arl15del2')
figures_dir = os.path.join(home, 'GoogleDrive/Research/Papers/20211205_Arl15del2_wat_phenotyping/figures')
# load cell data
cell_data_file = os.path.join(root_data_dir, 'Arl15_filtered.csv')
df_all = pd.read_csv(cell_data_file, header=0, sep=',', index_col=False)
# load metainformation
meta_data_file = os.path.join(root_data_dir, 'Arl15-del2 Global KO iWAT and gWAT segmentation analysis.xlsx')
metainfo = pd.read_excel(meta_data_file)
metainfo['Genotype'] = metainfo['Genotype'].astype(
pd.api.types.CategoricalDtype(categories=['Arl15-Del2:WT', 'Arl15-Del2:Het'], ordered=True))
# rename variables with whitespaces to avoid having to use Q("Age died") syntax everywhere
metainfo = metainfo.rename(columns={'Date of death': 'Date_of_death', 'Date of birth': 'Date_of_birth',
'Age died': 'Age_died', 'Fat mass': 'Fat_mass', 'Lean mass': 'Lean_mass'})
# scale BW to avoid large condition numbers
BW_mean = metainfo['BW'].mean()
metainfo['BW__'] = metainfo['BW'] / BW_mean
## effect of cull age on body weight
########################################################################################################################
bw_model = sm.OLS.from_formula('BW ~ C(Age_died)', data=metainfo).fit()
print(bw_model.summary())
print(bw_model.pvalues)
## effect of genotype on body weight
########################################################################################################################
bw_model = sm.OLS.from_formula('BW ~ C(Genotype)', data=metainfo).fit()
print(bw_model.summary())
print(bw_model.pvalues)
if SAVE_FIGS:
plt.clf()
# swarm plot of body weight
ax = sns.swarmplot(x='Genotype', y='BW', data=metainfo, dodge=True, palette=['C0', 'C1'], s=10)
plt.xlabel('')
plt.ylabel('Body weight (g)', fontsize=14)
plt.tick_params(labelsize=14)
plt.xticks([0, 1], labels=['WT', 'Het'])
# mean values
plt.plot([-0.10, 0.10], [bw_model.params['Intercept'], ] * 2, 'k', linewidth=2)
plt.plot([0.90, 1.10], [bw_model.params['Intercept'] + bw_model.params['C(Genotype)[T.Arl15-Del2:Het]'], ] * 2, 'k',
linewidth=2)
# bracket with p-value
plt.plot([0.0, 0.0, 1.0, 1.0], [60, 62, 62, 60], 'k', lw=1.5)
pval = bw_model.pvalues['C(Genotype)[T.Arl15-Del2:Het]']
pval_text = '{0:.3f}'.format(pval) + ' ' + cytometer.stats.pval_to_asterisk(pval)
plt.text(0.5, 62.5, pval_text, ha='center', va='bottom', fontsize=14)
plt.ylim(35, 65)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'arl15del2_exp_0003_paper_figures_swarm_bw_genotype.png'))
plt.savefig(os.path.join(figures_dir, 'arl15del2_exp_0003_paper_figures_swarm_bw_genotype.svg'))
## effect of genotype on fat percent and lean mass percent
########################################################################################################################
fatmass_model = sm.OLS.from_formula('Fat_mass ~ BW__ * C(Genotype)', data=metainfo).fit()
print(fatmass_model.summary())
# once we see that the condition number size is due to the scaling of BW, and not collinearity, we recompute the model
fatmass_model = sm.OLS.from_formula('Fat_mass ~ BW * C(Genotype)', data=metainfo).fit()
print(fatmass_model.summary())
leanmass_model = sm.OLS.from_formula('Lean_mass ~ BW__ * C(Genotype)', data=metainfo).fit()
print(leanmass_model.summary())
# once we see that the condition number size is due to the scaling of BW, and not collinearity, we recompute the model
leanmass_model = sm.OLS.from_formula('Lean_mass ~ BW * C(Genotype)', data=metainfo).fit()
print(leanmass_model.summary())
# null models (Genotypes pooled together)
fatmass_model_null = sm.OLS.from_formula('Fat_mass ~ BW', data=metainfo).fit()
leanmass_model_null = sm.OLS.from_formula('Lean_mass ~ BW', data=metainfo).fit()
print(fatmass_model_null.summary())
print(leanmass_model_null.summary())
# compute LRTs and extract p-values and LRs
lrt = pd.DataFrame(columns=['lr', 'pval', 'pval_ast'])
lr, pval = cytometer.stats.lrtest(fatmass_model_null.llf, fatmass_model.llf)
lrt.loc['fatmass_model', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(leanmass_model_null.llf, leanmass_model.llf)
lrt.loc['leanmass_model', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
# multitest correction using Benjamini-Krieger-Yekutieli
_, lrt['pval_adj'], _, _ = multipletests(lrt['pval'], method='fdr_tsbky', alpha=0.05, returnsorted=False)
lrt['pval_adj_ast'] = cytometer.stats.pval_to_asterisk(lrt['pval_adj'])
# check that just fat mass vs. Genotype doesn't show any effect, so the BW variable is needed
print(sm.OLS.from_formula('Fat_mass ~ Genotype', data=metainfo).fit().summary())
if SAVE_FIGS:
lrt.to_csv(os.path.join(figures_dir, 'arl15del2_exp_0003_fatmass_leanmass_models_lrt.csv'), na_rep='nan')
if SAVE_FIGS:
plt.clf()
plt.gcf().set_size_inches([6.4, 2.4])
plt.subplot(121)
cytometer.stats.plot_linear_regression(fatmass_model, metainfo, 'BW',
other_vars={'Genotype': 'Arl15-Del2:WT'},
dep_var='Fat_mass', c='C0', marker='x',
line_label='WT')
cytometer.stats.plot_linear_regression(fatmass_model, metainfo, 'BW',
other_vars={'Genotype': 'Arl15-Del2:Het'},
dep_var='Fat_mass', c='C1', marker='o',
line_label='Het')
cytometer.stats.plot_linear_regression(fatmass_model_null, metainfo, 'BW',
c='k--', line_label='All')
plt.xlim(35, 62)
plt.ylim(17, 37)
plt.tick_params(labelsize=14)
plt.title('Fat mass', fontsize=14)
plt.xlabel('Body weight (g)', fontsize=14)
plt.ylabel('Weight (g)', fontsize=14)
plt.legend(loc='upper left')
plt.subplot(122)
cytometer.stats.plot_linear_regression(leanmass_model, metainfo, 'BW',
other_vars={'Genotype': 'Arl15-Del2:WT'},
dep_var='Lean_mass', c='C0', marker='x',
line_label='WT')
cytometer.stats.plot_linear_regression(leanmass_model, metainfo, 'BW',
other_vars={'Genotype': 'Arl15-Del2:Het'},
dep_var='Lean_mass', c='C1', marker='o',
line_label='Het')
cytometer.stats.plot_linear_regression(leanmass_model_null, metainfo, 'BW',
c='k--', line_label='All')
plt.xlim(35, 62)
plt.ylim(12, 25)
plt.tick_params(labelsize=14)
plt.title('Lean mass', fontsize=14)
plt.xlabel('Body weight (g)', fontsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'arl15del2_exp_0003_paper_figures_fatmass_leanmass_models.png'))
plt.savefig(os.path.join(figures_dir, 'arl15del2_exp_0003_paper_figures_fatmass_leanmass_models.jpg'))
plt.savefig(os.path.join(figures_dir, 'arl15del2_exp_0003_paper_figures_fatmass_leanmass_models.svg'))
## effect of genotype and BW on depot weight
########################################################################################################################
# DW ~ BW * genotype
gwat_model = sm.OLS.from_formula('gWAT ~ BW__ * C(Genotype)', data=metainfo).fit()
print(gwat_model.summary())
gwat_model = sm.OLS.from_formula('gWAT ~ BW * C(Genotype)', data=metainfo).fit()
print(gwat_model.summary())
iwat_model = sm.OLS.from_formula('iWAT ~ BW__ * C(Genotype)', data=metainfo).fit()
print(iwat_model.summary())
iwat_model = sm.OLS.from_formula('iWAT ~ BW * C(Genotype)', data=metainfo).fit()
print(iwat_model.summary())
# null models (Genotypes pooled together)
gwat_model_null = sm.OLS.from_formula('gWAT ~ BW', data=metainfo).fit()
iwat_model_null = sm.OLS.from_formula('iWAT ~ BW', data=metainfo).fit()
# mean difference of BW vs. Genotype
gwat_model_meandiff = sm.OLS.from_formula('gWAT ~ C(Genotype)', data=metainfo).fit()
iwat_model_meandiff = sm.OLS.from_formula('iWAT ~ C(Genotype)', data=metainfo).fit()
print(gwat_model_null.summary())
print(iwat_model_null.summary())
print(gwat_model_meandiff.summary())
print(iwat_model_meandiff.summary())
# compute LRTs and extract p-values and LRs
lrt = pd.DataFrame(columns=['lr', 'pval', 'pval_ast'])
lr, pval = cytometer.stats.lrtest(gwat_model_null.llf, gwat_model.llf)
lrt.loc['gwat_model', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
lr, pval = cytometer.stats.lrtest(iwat_model_null.llf, iwat_model.llf)
lrt.loc['iwat_model', :] = (lr, pval, cytometer.stats.pval_to_asterisk(pval))
# multitest correction using Benjamini-Krieger-Yekutieli
_, lrt['pval_adj'], _, _ = multipletests(lrt['pval'], method='fdr_tsbky', alpha=0.05, returnsorted=False)
lrt['pval_adj_ast'] = cytometer.stats.pval_to_asterisk(lrt['pval_adj'])
# check that just fat mass vs. Genotype doesn't show any effect, so the BW variable is needed
print(sm.OLS.from_formula('gWAT ~ Genotype', data=metainfo).fit().summary())
print(sm.OLS.from_formula('iWAT ~ Genotype', data=metainfo).fit().summary())
# get a p-value for the slope of the inguinal DW model for Hets
model_names = ['iwat_model']
extra_hypotheses = 'BW+BW:C(Genotype)[T.Arl15-Del2:Het]'
df_coeff, df_ci_lo, df_ci_hi, df_pval = \
cytometer.stats.models_coeff_ci_pval(
[iwat_model],
extra_hypotheses=extra_hypotheses,
model_names=model_names)
if SAVE_FIGS:
plt.clf()
plt.gcf().set_size_inches([6.4, 2.4])
plt.subplot(121)
cytometer.stats.plot_linear_regression(gwat_model, metainfo, 'BW',
other_vars={'Genotype': 'Arl15-Del2:WT'},
dep_var='gWAT', c='C0', marker='x',
line_label='WT')
cytometer.stats.plot_linear_regression(gwat_model, metainfo, 'BW',
other_vars={'Genotype': 'Arl15-Del2:Het'},
dep_var='gWAT', c='C1', marker='o',
line_label='Het')
cytometer.stats.plot_linear_regression(gwat_model_null, metainfo, 'BW',
c='k--', line_label='All')
plt.xlim(35, 62)
plt.ylim(1.5, 3.0)
plt.tick_params(labelsize=14)
plt.title('Gonadal', fontsize=14)
plt.xlabel('Body weight (g)', fontsize=14)
plt.ylabel('Depot weight (g)', fontsize=14)
plt.legend(loc='upper left')
plt.subplot(122)
cytometer.stats.plot_linear_regression(iwat_model, metainfo, 'BW',
other_vars={'Genotype': 'Arl15-Del2:WT'},
dep_var='iWAT', c='C0', marker='x',
line_label='WT')
cytometer.stats.plot_linear_regression(iwat_model, metainfo, 'BW',
other_vars={'Genotype': 'Arl15-Del2:Het'},
dep_var='iWAT', c='C1', marker='o',
line_label='Het')
cytometer.stats.plot_linear_regression(iwat_model_null, metainfo, 'BW',
c='k--', line_label='All')
plt.title('Inguinal', fontsize=14)
plt.xlim(35, 62)
plt.ylim(1.1, 2.2)
plt.tick_params(labelsize=14)
plt.xlabel('Body weight (g)', fontsize=14)
plt.tight_layout()
plt.savefig(os.path.join(figures_dir, 'arl15del2_exp_0003_paper_figures_dw_models.png'))
plt.savefig(os.path.join(figures_dir, 'arl15del2_exp_0003_paper_figures_dw_models.jpg'))
plt.savefig(os.path.join(figures_dir, 'arl15del2_exp_0003_paper_figures_dw_models.svg'))
## effect of genotype and DW on cell area quartiles
########################################################################################################################
# compute cell quartiles for each mouse
# (only mode, 25%-, 50%- and 75%-quantiles for illustration purposes and debugging)
# 0.05, 0.1 , 0.15, 0.2, ..., 0.9 , 0.95
quantiles = np.linspace(0, 1, 21) #
# indices of the quantiles we are going to model
i_q1, i_q2, i_q3 = [5, 10, 15] # Q1, Q2, Q3
# extract ID (38.1e) from Animal (ARL15-DEL2-EM1-B6N/38.1e), so that we can search for the ID in the histology file name
metainfo['id'] = [x.split('/')[-1] for x in metainfo['Animal']]
# create dataframe with one row per mouse/depot, and the area quantiles
df_slides = pd.DataFrame()
slide_names = [x.lower() for x in df_all.columns]
for i in range(metainfo.shape[0]):
print('Mouse: ' + metainfo.loc[i, 'Animal'])
for depot in ['gwat', 'iwat']:
print('\tDepot: ' + depot)
# get list of all the columns of cell areas that correspond to this mouse/depot
i_histo = [(metainfo.loc[i, 'id'] in x) and (depot in x) for x in slide_names]
[print('\t\tslide: ' + x) for x in df_all.columns[i_histo]]
# concatenate all the cells for this animal
areas_all = df_all[df_all.columns[i_histo]].to_numpy().flatten()
areas_all = areas_all[~np.isnan(areas_all)]
# compute quantiles of the pooled cell population
areas_at_quantiles = stats.mstats.hdquantiles(areas_all, prob=quantiles, axis=0)
# name of the histology file, converted to lowercase so that e.g. 38.1E is identified as mouse 38.1e
# we can use the same slide name for all slides, because they all have the same mouse ID and depot tag
histo_string = df_all.columns[i_histo][0].lower()
df_row = cytometer.data.tag_values_with_mouse_info(metainfo=metainfo, s=histo_string,
values=[areas_at_quantiles[i_q1]], values_tag='area_Q1',
tags_to_keep=['id', 'Genotype', 'gWAT', 'iWAT'])
df_row['area_Q2'] = areas_at_quantiles[i_q2]
df_row['area_Q3'] = areas_at_quantiles[i_q3]
# check whether the slide is gWAT or iWAT
if 'gwat' in histo_string:
df_row['depot'] = 'gWAT'
df_row['DW'] = df_row['gWAT']
elif 'iwat' in histo_string:
df_row['depot'] = 'iWAT'
df_row['DW'] = df_row['iWAT']
else:
raise ValueError('Histology slide cannot be identified as either gWAT or iWAT')
df_slides = df_slides.append(df_row, ignore_index=True)
# pearson correlation coefficients for data stratified by depot
rho_df = pd.DataFrame()
rho = df_slides[(df_slides['depot'] == 'gWAT')][['area_Q1', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'gwat_q1', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'gWAT')][['area_Q2', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'gwat_q2', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'gWAT')][['area_Q3', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'gwat_q3', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'iWAT')][['area_Q1', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'iwat_q1', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'iWAT')][['area_Q2', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'iwat_q2', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'iWAT')][['area_Q3', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'iwat_q3', 'rho': rho}, ignore_index=True)
print(rho_df)
# pearson correlation coefficients for data stratified by depot and genotype
rho_df = pd.DataFrame()
rho = df_slides[(df_slides['depot'] == 'gWAT') & (df_slides['Genotype'] == 'Arl15-Del2:WT')][['area_Q1', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'gwat_q1_wt', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'gWAT') & (df_slides['Genotype'] == 'Arl15-Del2:Het')][['area_Q1', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'gwat_q1_het', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'gWAT') & (df_slides['Genotype'] == 'Arl15-Del2:WT')][['area_Q2', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'gwat_q2_wt', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'gWAT') & (df_slides['Genotype'] == 'Arl15-Del2:Het')][['area_Q2', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'gwat_q2_het', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'gWAT') & (df_slides['Genotype'] == 'Arl15-Del2:WT')][['area_Q3', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'gwat_q3_wt', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'gWAT') & (df_slides['Genotype'] == 'Arl15-Del2:Het')][['area_Q3', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'gwat_q3_het', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'iWAT') & (df_slides['Genotype'] == 'Arl15-Del2:WT')][['area_Q1', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'iwat_q1_wt', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'iWAT') & (df_slides['Genotype'] == 'Arl15-Del2:Het')][['area_Q1', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'iwat_q1_het', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'iWAT') & (df_slides['Genotype'] == 'Arl15-Del2:WT')][['area_Q2', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'iwat_q2_wt', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'iWAT') & (df_slides['Genotype'] == 'Arl15-Del2:Het')][['area_Q2', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'iwat_q2_het', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'iWAT') & (df_slides['Genotype'] == 'Arl15-Del2:WT')][['area_Q3', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'iwat_q3_wt', 'rho': rho}, ignore_index=True)
rho = df_slides[(df_slides['depot'] == 'iWAT') & (df_slides['Genotype'] == 'Arl15-Del2:Het')][['area_Q3', 'DW']].corr().iloc[0, 1]
rho_df = rho_df.append({'model': 'iwat_q3_het', 'rho': rho}, ignore_index=True)
# area quartiles mean differences (WT vs. Het)
gwat_q1_meandiff_model = sm.OLS.from_formula('area_Q1 ~ C(Genotype)', data=df_slides,
subset=df_slides['depot'] == 'gWAT').fit()
gwat_q2_meandiff_model = sm.OLS.from_formula('area_Q2 ~ C(Genotype)', data=df_slides,
subset=df_slides['depot'] == 'gWAT').fit()
gwat_q3_meandiff_model = sm.OLS.from_formula('area_Q3 ~ C(Genotype)', data=df_slides,
subset=df_slides['depot'] == 'gWAT').fit()
iwat_q1_meandiff_model = sm.OLS.from_formula('area_Q1 ~ C(Genotype)', data=df_slides,
subset=df_slides['depot'] == 'iWAT').fit()
iwat_q2_meandiff_model = sm.OLS.from_formula('area_Q2 ~ C(Genotype)', data=df_slides,
subset=df_slides['depot'] == 'iWAT').fit()
iwat_q3_meandiff_model = sm.OLS.from_formula('area_Q3 ~ C(Genotype)', data=df_slides,
subset=df_slides['depot'] == 'iWAT').fit()
print(gwat_q1_meandiff_model.summary())
print(gwat_q2_meandiff_model.summary())
print(gwat_q3_meandiff_model.summary())
print(iwat_q1_meandiff_model.summary())
print(iwat_q2_meandiff_model.summary())
print(iwat_q3_meandiff_model.summary())
df_meandiff = pd.DataFrame()
df_meandiff = \
df_meandiff.append(ignore_index=True,
other={'model': 'gwat_q1_meandiff',
'meandiff': np.round(gwat_q1_meandiff_model.params['C(Genotype)[T.Arl15-Del2:Het]']),
't': gwat_q1_meandiff_model.tvalues['C(Genotype)[T.Arl15-Del2:Het]'].round(2),
'pval': gwat_q1_meandiff_model.pvalues['C(Genotype)[T.Arl15-Del2:Het]']})
df_meandiff = \
df_meandiff.append(ignore_index=True,
other={'model': 'gwat_q2_meandiff',
'meandiff': np.round(gwat_q2_meandiff_model.params['C(Genotype)[T.Arl15-Del2:Het]']),
't': gwat_q2_meandiff_model.tvalues['C(Genotype)[T.Arl15-Del2:Het]'].round(2),
'pval': gwat_q2_meandiff_model.pvalues['C(Genotype)[T.Arl15-Del2:Het]']})
df_meandiff = \
df_meandiff.append(ignore_index=True,
other={'model': 'gwat_q3_meandiff',
'meandiff': np.round(gwat_q3_meandiff_model.params['C(Genotype)[T.Arl15-Del2:Het]']),
't': gwat_q3_meandiff_model.tvalues['C(Genotype)[T.Arl15-Del2:Het]'].round(2),
'pval': gwat_q3_meandiff_model.pvalues['C(Genotype)[T.Arl15-Del2:Het]']})
df_meandiff = \
df_meandiff.append(ignore_index=True,
other={'model': 'iwat_q1_meandiff',
'meandiff': np.round(iwat_q1_meandiff_model.params['C(Genotype)[T.Arl15-Del2:Het]']),
't': iwat_q1_meandiff_model.tvalues['C(Genotype)[T.Arl15-Del2:Het]'].round(2),
'pval': iwat_q1_meandiff_model.pvalues['C(Genotype)[T.Arl15-Del2:Het]']})
df_meandiff = \
df_meandiff.append(ignore_index=True,
other={'model': 'iwat_q2_meandiff',
'meandiff': np.round(iwat_q2_meandiff_model.params['C(Genotype)[T.Arl15-Del2:Het]']),
't': iwat_q2_meandiff_model.tvalues['C(Genotype)[T.Arl15-Del2:Het]'].round(2),
'pval': iwat_q2_meandiff_model.pvalues['C(Genotype)[T.Arl15-Del2:Het]']})
df_meandiff = \
df_meandiff.append(ignore_index=True,
other={'model': 'iwat_q3_meandiff',
'meandiff': np.round(iwat_q3_meandiff_model.params['C(Genotype)[T.Arl15-Del2:Het]']),
't': iwat_q3_meandiff_model.tvalues['C(Genotype)[T.Arl15-Del2:Het]'].round(2),
'pval': iwat_q3_meandiff_model.pvalues['C(Genotype)[T.Arl15-Del2:Het]']})
df_meandiff['pval_ast'] = cytometer.stats.pval_to_asterisk(df_meandiff['pval'])
# multitest correction using Benjamini-Krieger-Yekutieli
_, df_meandiff['pval_adj'], _, _ = multipletests(df_meandiff['pval'], method='fdr_tsbky', alpha=0.05, returnsorted=False)
df_meandiff['pval_adj_ast'] = cytometer.stats.pval_to_asterisk(df_meandiff['pval_adj'])
# fit models of area quartiles vs. depot weight * genotype
gwat_q1_model = sm.OLS.from_formula('area_Q1 ~ DW * C(Genotype)', data=df_slides,
subset=df_slides['depot'] == 'gWAT').fit()
gwat_q2_model = sm.OLS.from_formula('area_Q2 ~ DW * C(Genotype)', data=df_slides,
subset=df_slides['depot'] == 'gWAT').fit()
gwat_q3_model = sm.OLS.from_formula('area_Q3 ~ DW * C(Genotype)', data=df_slides,
subset=df_slides['depot'] == 'gWAT').fit()
iwat_q1_model = sm.OLS.from_formula('area_Q1 ~ DW * C(Genotype)', data=df_slides,
subset=df_slides['depot'] == 'iWAT').fit()
iwat_q2_model = sm.OLS.from_formula('area_Q2 ~ DW * C(Genotype)', data=df_slides,
subset=df_slides['depot'] == 'iWAT').fit()
iwat_q3_model = sm.OLS.from_formula('area_Q3 ~ DW * C(Genotype)', data=df_slides,
subset=df_slides['depot'] == 'iWAT').fit()
# null models
gwat_q1_null_model = sm.OLS.from_formula('area_Q1 ~ DW', data=df_slides,
subset=df_slides['depot'] == 'gWAT').fit()
gwat_q2_null_model = sm.OLS.from_formula('area_Q2 ~ DW', data=df_slides,
subset=df_slides['depot'] == 'gWAT').fit()
gwat_q3_null_model = sm.OLS.from_formula('area_Q3 ~ DW', data=df_slides,
subset=df_slides['depot'] == 'gWAT').fit()
iwat_q1_null_model = sm.OLS.from_formula('area_Q1 ~ DW', data=df_slides,
subset=df_slides['depot'] == 'iWAT').fit()
iwat_q2_null_model = sm.OLS.from_formula('area_Q2 ~ DW', data=df_slides,
subset=df_slides['depot'] == 'iWAT').fit()
iwat_q3_null_model = sm.OLS.from_formula('area_Q3 ~ DW', data=df_slides,
subset=df_slides['depot'] == 'iWAT').fit()
print(gwat_q1_model.summary())
print(gwat_q2_model.summary())
print(gwat_q3_model.summary())
print(iwat_q1_model.summary())
print(iwat_q2_model.summary())
print(iwat_q3_model.summary())
print(gwat_q1_null_model.summary())
print(gwat_q2_null_model.summary())
print(gwat_q3_null_model.summary())
print(iwat_q1_null_model.summary())
print(iwat_q2_null_model.summary())
print(iwat_q3_null_model.summary())
# compute LRTs and extract p-values and LRs
lrt = | pd.DataFrame(columns=['lr', 'pval', 'pval_ast']) | pandas.DataFrame |
import os
import tempfile
import path
import functools
from itertools import islice
import pandas as pd
import numpy as np
from trumania.core.random_generators import SequencialGenerator, NumpyRandomGenerator, ConstantGenerator, seed_provider
from trumania.core.random_generators import DependentTriggerGenerator, FakerGenerator, Generator
def test_constant_generator_should_produce_constant_values():
tested = ConstantGenerator(value="c")
assert [] == tested.generate(size=0)
assert ["c"] == tested.generate(size=1)
assert ["c", "c", "c", "c", "c"] == tested.generate(size=5)
def test_numpy_random_generator_should_delegate_to_numpy_correctly():
# basic "smoke" test, if it does not crash it at least proves it's able
# to load the appropriate method
tested = NumpyRandomGenerator(method="normal", loc=10, scale=4, seed=1)
assert len(tested.generate(size=10)) == 10
def test_seeder_should_be_deterministic():
"""
makes sure the seeds always provides the same sequence of seeds
"""
master_seed = 12345
seeder1 = seed_provider(master_seed)
seeder2 = seed_provider(master_seed)
assert list(islice(seeder1, 1000)) == list(islice(seeder2, 1000))
def test_depend_trigger_should_trigger_given_constant_value():
# returns 6 hard-coded 1 and zero
def fake_mapper(x):
return [1, 1, 0, 0, 1, 0]
g = DependentTriggerGenerator(value_to_proba_mapper=fake_mapper)
triggers = g.generate(observations=pd.Series([10, 20, 30, 0, 1, 2]))
# because the fake_mapper returns fake values, we should always have the
# following triggers, no matter what the internal uniform distro provided
assert triggers.tolist() == [True, True, False, False, True, False]
def test_sequencial_generator_should_create_unique_values():
tested = SequencialGenerator(start=10, prefix="test_p_", max_length=10)
sizes = [100, 200, 300, 400, 500]
sets = [set(tested.generate(size)) for size in sizes]
# generated values should be unique within each set
all_values = functools.reduce(lambda s1, s2: s1 | s2, sets)
assert len(all_values) == np.sum(sizes)
def test_random_generator_should_provide_correct_amount_of_single_values():
tested = NumpyRandomGenerator(method="gamma", scale=10, shape=1.8, seed=1)
genops = tested.ops.generate(named_as="rand")
story_data = pd.DataFrame(
np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result, logs = genops(story_data)
assert result.columns.tolist() == ["A", "B", "C", "D", "E", "rand"]
# should be float and not list of values
assert result["rand"].dtype == float
def test_random_generator_should_provide_correct_amount_of_list_of_values():
tested = NumpyRandomGenerator(method="gamma", scale=10, shape=1.8, seed=1)
story_data = pd.DataFrame(
np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"],
)
story_data["how_many"] = | pd.Series([10, 20, 30, 40, 50, 60, 70, 80, 90, 100]) | pandas.Series |
import pandas as pd
import numpy as np
from datetime import datetime
from multiprocessing import Pool
from functools import partial
from pathos import pools as pp
import pickle as pkl
from UserCentricMeasurements import *
from RepoCentricMeasurements import *
from CommunityCentricMeasurements import *
from TEMeasurements import *
from collections import defaultdict
import jpype
import json
class Measurements(UserCentricMeasurements, RepoCentricMeasurements, TEMeasurements, CommunityCentricMeasurements):
def __init__(self, dfLoc, interested_repos=[], interested_users=[], metaRepoData=False, metaUserData=False,
repoActorsFile='data/filtUsers-test.pkl',reposFile='data/filtRepos-test.pkl',topNodes=[],topEdges=[],
previousActionsFile='',community_dictionary='data/communities.pkl',te_config='te_params_dry_run2.json'):
super(Measurements, self).__init__()
try:
#check if input is a data frame
dfLoc.columns
df = dfLoc
except:
#if not it should be a csv file path
df = pd.read_csv(dfLoc)
self.contribution_events = ["PullRequestEvent", "PushEvent", "IssuesEvent","IssueCommentEvent","PullRequestReviewCommentEvent","CommitCommentEvent","CreateEvent"]
self.popularity_events = ['WatchEvent','ForkEvent']
print('preprocessing...')
self.main_df = self.preprocess(df)
print('splitting optional columns...')
#store action and merged columns in a seperate data frame that is not used for most measurements
if len(self.main_df.columns) == 6:
self.main_df_opt = self.main_df.copy()[['action','merged']]
self.main_df_opt['merged'] = self.main_df_opt['merged'].astype(bool)
self.main_df = self.main_df.drop(['action','merged'],axis=1)
else:
self.main_df_opt = None
#For repoCentric
print('getting selected repos...')
self.selectedRepos = self.getSelectRepos(interested_repos) #Dictionary of selected repos index == repoid
#For userCentric
self.selectedUsers = self.main_df[self.main_df.user.isin(interested_users)]
print('processing repo metatdata...')
#read in external metadata files
#repoMetaData format - full_name_h,created_at,owner.login_h,language
#userMetaData format - login_h,created_at,location,company
if metaRepoData != False:
self.useRepoMetaData = True
self.repoMetaData = self.preprocessRepoMeta(pd.read_csv(metaRepoData))
else:
self.useRepoMetaData = False
print('processing user metatdata...')
if metaUserData != False:
self.useUserMetaData = True
self.userMetaData = self.preprocessUserMeta(pd.read_csv(metaUserData))
else:
self.useUserMetaData = False
#For Community
print('getting communities...')
self.communities = self.getCommunities(path=community_dictionary)
#read in previous events count external file (used only for one measurement)
try:
print('reading previous counts...')
self.previous_event_counts = pd.read_csv(previousActionsFile)
except:
self.previous_event_counts = None
#For TE
print('starting jvm...')
if not jpype.isJVMStarted():
jpype.startJVM(jpype.getDefaultJVMPath(), "-ea", "-Djava.class.path=" + "infodynamics.jar")
self.top_users = topNodes
self.top_edges = topEdges
#read pkl files which define nodes of interest for TE measurements
self.repo_actors = self.readPickleFile(repoActorsFile)
self.repo_groups = self.readPickleFile(reposFile)
#set TE parameters
with open(te_config,'rb') as f:
te_params = json.load(f)
self.startTime = pd.Timestamp(te_params['startTime'])
self.binSize= te_params['binSize']
self.teThresh = te_params['teThresh']
self.delayUnits = np.array(te_params['delayUnits'])
self.starEvent = te_params['starEvent']
self.otherEvents = te_params['otherEvents']
self.kE = te_params['kE']
self.kN = te_params['kN']
self.nReps = te_params['nReps']
self.bGetTS = te_params['bGetTS']
def preprocess(self,df):
#edit columns, convert date, sort by date
if df.columns[0] == '_id':
del df['_id']
if len(df.columns) == 4:
df.columns = ['time', 'event', 'user', 'repo']
else:
df.columns = ['time', 'event', 'user', 'repo','action','merged']
df = df[df.event.isin(self.popularity_events + self.contribution_events)]
df['time'] = pd.to_datetime(df['time'])
df = df.sort_values(by='time')
df = df.assign(time=df.time.dt.floor('h'))
return df
def preprocessRepoMeta(self,df):
try:
df.columns = ['repo','created_at','owner_id','language']
except:
df.columns = ['created_at','owner_id','repo']
df = df[df.repo.isin(self.main_df.repo.values)]
df['created_at'] = pd.to_datetime(df['created_at'])
#df = df.drop_duplicates('repo')
return df
def preprocessUserMeta(self,df):
try:
df.columns = ['user','created_at','location','company']
except:
df.columns = ['user','created_at','city','country','company']
df = df[df.user.isin(self.main_df.user.values)]
df['created_at'] = | pd.to_datetime(df['created_at']) | pandas.to_datetime |
"""
Clean a DataFrame column containing text data.
"""
import re
import string
from functools import partial, update_wrapper
from typing import Any, Callable, Dict, List, Optional, Set, Union
from unicodedata import normalize
import dask.dataframe as dd
import numpy as np
import pandas as pd
from ..assets.english_stopwords import english_stopwords
from .utils import NULL_VALUES, to_dask
REGEX_BRACKETS = {
"angle": re.compile(r"(\<)[^<>]*(\>)"),
"curly": re.compile(r"(\{)[^{}]*(\})"),
"round": re.compile(r"(\()[^()]*(\))"),
"square": re.compile(r"(\[)[^\[\]]*(\])"),
}
REGEX_DIGITS = re.compile(r"\d+")
REGEX_DIGITS_BLOCK = re.compile(r"\b\d+\b")
REGEX_HTML = re.compile(r"<[A-Za-z/][^>]*>|&(?:[a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});")
REGEX_PUNCTUATION = re.compile(rf"[{re.escape(string.punctuation)}]")
REGEX_URL = re.compile(r"(?:https?://|www\.)\S+")
REGEX_WHITESPACE = re.compile(r"[\n\t]|[ ]{2,}")
def clean_text(
df: Union[pd.DataFrame, dd.DataFrame],
column: str,
pipeline: Optional[List[Dict[str, Any]]] = None,
stopwords: Optional[Set[str]] = None,
) -> pd.DataFrame:
"""
Clean text data in a DataFrame column.
Read more in the :ref:`User Guide <clean_text_user_guide>`.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be cleaned.
column
The name of the column containing text data.
pipeline
A list of cleaning functions to be applied to the column. If None,
use the default pipeline. See the :ref:`User Guide <clean_text_custom_pipeline>`
for more information on customizing the pipeline.
(default: None)
stopwords
A set of words to be removed from the column. If None, use NLTK's
stopwords.
(default: None)
Examples
--------
Clean a column of text data using the default pipeline.
>>> df = pd.DataFrame({"text": ["This show was an amazing, fresh & innovative idea in the \
70's when it first aired."]})
>>> clean_text(df, 'text')
text
0 show amazing fresh innovative idea first aired
"""
df = to_dask(df)
pipe = _get_default_pipeline(stopwords) if not pipeline else _get_custom_pipeline(pipeline)
for func in pipe:
df[column] = df[column].apply(func, meta=object)
df = df.compute()
return df
def default_text_pipeline() -> List[Dict[str, Any]]:
"""
Return a list of dictionaries representing the functions in the default pipeline.
Use as a template for creating a custom pipeline.
Read more in the :ref:`User Guide <clean_text_user_guide>`.
Examples
--------
>>> default_text_pipeline()
[{'operator': 'fillna'}, {'operator': 'lowercase'}, {'operator': 'remove_digits'},
{'operator': 'remove_html'}, {'operator': 'remove_urls'}, {'operator': 'remove_punctuation'},
{'operator': 'remove_accents'}, {'operator': 'remove_stopwords', 'parameters':
{'stopwords': None}}, {'operator': 'remove_whitespace'}]
"""
return [
{"operator": "fillna"},
{"operator": "lowercase"},
{"operator": "remove_digits"},
{"operator": "remove_html"},
{"operator": "remove_urls"},
{"operator": "remove_punctuation"},
{"operator": "remove_accents"},
{"operator": "remove_stopwords", "parameters": {"stopwords": None}},
{"operator": "remove_whitespace"},
]
def _get_default_pipeline(
stopwords: Optional[Set[str]] = None,
) -> List[Callable[..., Any]]:
"""
Return a list of functions defining the default pipeline.
"""
return [
_fillna,
_lowercase,
_remove_digits,
_remove_html,
_remove_urls,
_remove_punctuation,
_remove_accents,
lambda x: _remove_stopwords(x, stopwords),
_remove_whitespace,
]
def _get_custom_pipeline(pipeline: List[Dict[str, Any]]) -> List[Callable[..., Any]]:
"""
Return a list of functions defining a custom pipeline.
"""
func_dict = _get_func_dict()
custom_pipeline: List[Callable[..., Any]] = []
for component in pipeline:
# Check whether function is built in or user defined
operator = (
func_dict[component["operator"]]
if isinstance(component["operator"], str)
else component["operator"]
)
# Append the function to the pipeline
# If parameters are specified, create a partial function to lock in
# the values and prevent them from being overwritten in subsequent loops
if "parameters" in component:
custom_pipeline.append(_wrapped_partial(operator, component["parameters"]))
else:
custom_pipeline.append(operator)
return custom_pipeline
def _get_func_dict() -> Dict[str, Callable[..., Any]]:
"""
Return a mapping of strings to function names.
"""
return {
"fillna": _fillna,
"lowercase": _lowercase,
"sentence_case": _sentence_case,
"title_case": _title_case,
"uppercase": _uppercase,
"remove_accents": _remove_accents,
"remove_bracketed": _remove_bracketed,
"remove_digits": _remove_digits,
"remove_html": _remove_html,
"remove_prefixed": _remove_prefixed,
"remove_punctuation": _remove_punctuation,
"remove_stopwords": _remove_stopwords,
"remove_urls": _remove_urls,
"remove_whitespace": _remove_whitespace,
"replace_bracketed": _replace_bracketed,
"replace_digits": _replace_digits,
"replace_prefixed": _replace_prefixed,
"replace_punctuation": _replace_punctuation,
"replace_stopwords": _replace_stopwords,
"replace_text": _replace_text,
"replace_urls": _replace_urls,
}
def _fillna(text: Any, value: Any = np.nan) -> Any:
"""
Replace all null values with NaN (default) or the supplied value.
"""
return value if text in NULL_VALUES else str(text)
def _lowercase(text: Any) -> Any:
"""
Convert all characters to lowercase.
"""
return str(text).lower() if pd.notna(text) else text
def _sentence_case(text: Any) -> Any:
"""
Convert first character to uppercase and remaining to lowercase.
"""
return str(text).capitalize() if pd.notna(text) else text
def _title_case(text: Any) -> Any:
"""
Convert first character of each word to uppercase and remaining to lowercase.
"""
return str(text).title() if pd.notna(text) else text
def _uppercase(text: Any) -> Any:
"""
Convert all characters to uppercase.
"""
return str(text).upper() if pd.notna(text) else text
def _remove_accents(text: Any) -> Any:
"""
Remove accents (diacritic marks).
"""
return (
normalize("NFD", str(text)).encode("ascii", "ignore").decode("ascii")
if pd.notna(text)
else text
)
def _remove_bracketed(text: Any, brackets: Union[str, Set[str]], inclusive: bool = True) -> Any:
"""
Remove text between brackets.
Parameters
----------
brackets
The bracket style.
- "angle": <>
- "curly": {}
- "round": ()
- "square": []
inclusive
If True (default), remove the brackets along with the text in between.
Otherwise, keep the brackets.
"""
if pd.isna(text):
return text
text = str(text)
value = "" if inclusive else r"\g<1>\g<2>"
if isinstance(brackets, set):
for bracket in brackets:
text = re.sub(REGEX_BRACKETS[bracket], value, text)
else:
text = re.sub(REGEX_BRACKETS[brackets], value, text)
return text
def _remove_digits(text: Any) -> Any:
"""
Remove all digits.
"""
return re.sub(REGEX_DIGITS, "", str(text)) if pd.notna(text) else text
def _remove_html(text: Any) -> Any:
"""
Remove HTML tags.
"""
return re.sub(REGEX_HTML, "", str(text)) if pd.notna(text) else text
def _remove_prefixed(text: Any, prefix: Union[str, Set[str]]) -> Any:
"""
Remove substrings that start with the prefix(es).
"""
if pd.isna(text):
return text
text = str(text)
if isinstance(prefix, set):
for pre in prefix:
text = re.sub(rf"{pre}\S+", "", text)
else:
text = re.sub(rf"{prefix}\S+", "", text)
return text
def _remove_punctuation(text: Any) -> Any:
"""
Remove punctuation marks.
"""
return re.sub(REGEX_PUNCTUATION, " ", str(text)) if pd.notna(text) else text
def _remove_stopwords(text: Any, stopwords: Optional[Set[str]] = None) -> Any:
"""
Remove a set of words from the text.
If `stopwords` is None (default), use NLTK's stopwords.
"""
if pd.isna(text):
return text
stopwords = english_stopwords if not stopwords else stopwords
return " ".join(word for word in str(text).split() if word.lower() not in stopwords)
def _remove_urls(text: Any) -> Any:
"""
Remove URLS.
"""
return re.sub(REGEX_URL, "", str(text)) if pd.notna(text) else text
def _remove_whitespace(text: Any) -> Any:
"""
Remove extra spaces along with tabs and newlines.
"""
return re.sub(REGEX_WHITESPACE, " ", str(text)).strip() if | pd.notna(text) | pandas.notna |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import importlib.resources
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
# Construct a dictionary mapping a canonical fuel name to a list of strings
# which are used to represent that fuel in the FERC Form 1 Reporting. Case is
# ignored, as all fuel strings can be converted to a lower case in the data
# set.
# Previous categories of ferc1_biomass_strings and ferc1_stream_strings have
# been deleted and their contents redistributed to ferc1_waste_strings and
# ferc1_other_strings
ferc1_coal_strings = [
'coal', 'coal-subbit', 'lignite', 'coal(sb)', 'coal (sb)', 'coal-lignite',
'coke', 'coa', 'lignite/coal', 'coal - subbit', 'coal-subb', 'coal-sub',
'coal-lig', 'coal-sub bit', 'coals', 'ciak', 'petcoke', 'coal.oil',
'coal/gas', 'bit coal', 'coal-unit #3', 'coal-subbitum', 'coal tons',
'coal mcf', 'coal unit #3', 'pet. coke', 'coal-u3', 'coal&coke', 'tons'
]
"""
list: A list of strings which are used to represent coal fuel in FERC Form 1
reporting.
"""
ferc1_oil_strings = [
'oil', '#6 oil', '#2 oil', 'fuel oil', 'jet', 'no. 2 oil', 'no.2 oil',
'no.6& used', 'used oil', 'oil-2', 'oil (#2)', 'diesel oil',
'residual oil', '# 2 oil', 'resid. oil', 'tall oil', 'oil/gas',
'no.6 oil', 'oil-fuel', 'oil-diesel', 'oil / gas', 'oil bbls', 'oil bls',
'no. 6 oil', '#1 kerosene', 'diesel', 'no. 2 oils', 'blend oil',
'#2oil diesel', '#2 oil-diesel', '# 2 oil', 'light oil', 'heavy oil',
'gas.oil', '#2', '2', '6', 'bbl', 'no 2 oil', 'no 6 oil', '#1 oil', '#6',
'oil-kero', 'oil bbl', 'biofuel', 'no 2', 'kero', '#1 fuel oil',
'no. 2 oil', 'blended oil', 'no 2. oil', '# 6 oil', 'nno. 2 oil',
'#2 fuel', 'oill', 'oils', 'gas/oil', 'no.2 oil gas', '#2 fuel oil',
'oli', 'oil (#6)', 'oil/diesel', '2 oil', '#6 hvy oil', 'jet fuel',
'diesel/compos', 'oil-8', 'oil {6}', 'oil-unit #1', 'bbl.', 'oil.',
'oil #6', 'oil (6)', 'oil(#2)', 'oil-unit1&2', 'oil-6', '#2 fue oil',
'dielel oil', 'dielsel oil', '#6 & used', 'barrels', 'oil un 1 & 2',
'jet oil', 'oil-u1&2', 'oiul', 'pil', 'oil - 2', '#6 & used', 'oial'
]
"""
list: A list of strings which are used to represent oil fuel in FERC Form 1
reporting.
"""
ferc1_gas_strings = [
'gas', 'gass', 'methane', 'natural gas', 'blast gas', 'gas mcf',
'propane', 'prop', 'natural gas', 'nat.gas', 'nat gas',
'nat. gas', 'natl gas', 'ga', 'gas`', 'syngas', 'ng', 'mcf',
'blast gaa', 'nat gas', 'gac', 'syngass', 'prop.', 'natural', 'coal.gas',
'n. gas', 'lp gas', 'natuaral gas', 'coke gas', 'gas #2016', 'propane**',
'* propane', 'propane **', 'gas expander', 'gas ct', '# 6 gas', '#6 gas',
'coke oven gas'
]
"""
list: A list of strings which are used to represent gas fuel in FERC Form 1
reporting.
"""
ferc1_solar_strings = []
ferc1_wind_strings = []
ferc1_hydro_strings = []
ferc1_nuke_strings = [
'nuclear', 'grams of uran', 'grams of', 'grams of ura',
'grams', 'nucleur', 'nulear', 'nucl', 'nucleart', 'nucelar',
'gr.uranium', 'grams of urm', 'nuclear (9)', 'nulcear', 'nuc',
'gr. uranium', 'nuclear mw da', 'grams of ura'
]
"""
list: A list of strings which are used to represent nuclear fuel in FERC Form
1 reporting.
"""
ferc1_waste_strings = [
'tires', 'tire', 'refuse', 'switchgrass', 'wood waste', 'woodchips',
'biomass', 'wood', 'wood chips', 'rdf', 'tires/refuse', 'tire refuse',
'waste oil', 'waste', 'woodships', 'tire chips'
]
"""
list: A list of strings which are used to represent waste fuel in FERC Form 1
reporting.
"""
ferc1_other_strings = [
'steam', 'purch steam', 'all', 'tdf', 'n/a', 'purch. steam', 'other',
'composite', 'composit', 'mbtus', 'total', 'avg', 'avg.', 'blo',
'all fuel', 'comb.', 'alt. fuels', 'na', 'comb', '/#=2\x80â\x91?',
'kã\xadgv¸\x9d?', "mbtu's", 'gas, oil', 'rrm', '3\x9c', 'average',
'furfural', '0', 'watson bng', 'toal', 'bng', '# 6 & used', 'combined',
'blo bls', 'compsite', '*', 'compos.', 'gas / oil', 'mw days', 'g', 'c',
'lime', 'all fuels', 'at right', '20', '1', 'comp oil/gas', 'all fuels to',
'the right are', 'c omposite', 'all fuels are', 'total pr crk',
'all fuels =', 'total pc', 'comp', 'alternative', 'alt. fuel', 'bio fuel',
'total prairie', ''
]
"""list: A list of strings which are used to represent other fuels in FERC Form
1 reporting.
"""
# There are also a bunch of other weird and hard to categorize strings
# that I don't know what to do with... hopefully they constitute only a
# small fraction of the overall generation.
ferc1_fuel_strings = {"coal": ferc1_coal_strings,
"oil": ferc1_oil_strings,
"gas": ferc1_gas_strings,
"solar": ferc1_solar_strings,
"wind": ferc1_wind_strings,
"hydro": ferc1_hydro_strings,
"nuclear": ferc1_nuke_strings,
"waste": ferc1_waste_strings,
"other": ferc1_other_strings
}
"""dict: A dictionary linking fuel types (keys) to lists of various strings
representing that fuel (values)
"""
# Similarly, dictionary for cleaning up fuel unit strings
ferc1_ton_strings = ['toms', 'taons', 'tones', 'col-tons', 'toncoaleq', 'coal',
'tons coal eq', 'coal-tons', 'ton', 'tons', 'tons coal',
'coal-ton', 'tires-tons', 'coal tons -2 ',
'coal tons 200', 'ton-2000', 'coal tons -2', 'coal tons',
'coal-tone', 'tire-ton', 'tire-tons', 'ton coal eqv']
"""list: A list of fuel unit strings for tons."""
ferc1_mcf_strings = \
['mcf', "mcf's", 'mcfs', 'mcf.', 'gas mcf', '"gas" mcf', 'gas-mcf',
'mfc', 'mct', ' mcf', 'msfs', 'mlf', 'mscf', 'mci', 'mcl', 'mcg',
'm.cu.ft.', 'kcf', '(mcf)', 'mcf *(4)', 'mcf00', 'm.cu.ft..']
"""list: A list of fuel unit strings for thousand cubic feet."""
ferc1_bbl_strings = \
['barrel', 'bbls', 'bbl', 'barrels', 'bbrl', 'bbl.', 'bbls.',
'oil 42 gal', 'oil-barrels', 'barrrels', 'bbl-42 gal',
'oil-barrel', 'bb.', 'barrells', 'bar', 'bbld', 'oil- barrel',
'barrels .', 'bbl .', 'barels', 'barrell', 'berrels', 'bb',
'bbl.s', 'oil-bbl', 'bls', 'bbl:', 'barrles', 'blb', 'propane-bbl',
'barriel', 'berriel', 'barrile', '(bbl.)', 'barrel *(4)', '(4) barrel',
'bbf', 'blb.', '(bbl)', 'bb1', 'bbsl', 'barrrel', 'barrels 100%',
'bsrrels', "bbl's", '*barrels', 'oil - barrels', 'oil 42 gal ba', 'bll',
'boiler barrel', 'gas barrel', '"boiler" barr', '"gas" barrel',
'"boiler"barre', '"boiler barre', 'barrels .']
"""list: A list of fuel unit strings for barrels."""
ferc1_gal_strings = ['gallons', 'gal.', 'gals', 'gals.', 'gallon', 'gal',
'galllons']
"""list: A list of fuel unit strings for gallons."""
ferc1_1kgal_strings = ['oil(1000 gal)', 'oil(1000)', 'oil (1000)', 'oil(1000',
'oil(1000ga)']
"""list: A list of fuel unit strings for thousand gallons."""
ferc1_gramsU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'gram', 'grams', 'gm u', 'grams u235', 'grams u-235', 'grams of uran',
'grams: u-235', 'grams:u-235', 'grams:u235', 'grams u308', 'grams: u235',
'grams of', 'grams - n/a', 'gms uran', 's e uo2 grams', 'gms uranium',
'grams of urm', 'gms. of uran', 'grams (100%)', 'grams v-235',
'se uo2 grams'
]
"""list: A list of fuel unit strings for grams."""
ferc1_kgU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'kg of uranium', 'kg uranium', 'kilg. u-235', 'kg u-235', 'kilograms-u23',
'kg', 'kilograms u-2', 'kilograms', 'kg of', 'kg-u-235', 'kilgrams',
'kilogr. u235', 'uranium kg', 'kg uranium25', 'kilogr. u-235',
'kg uranium 25', 'kilgr. u-235', 'kguranium 25', 'kg-u235'
]
"""list: A list of fuel unit strings for thousand grams."""
ferc1_mmbtu_strings = ['mmbtu', 'mmbtus', 'mbtus', '(mmbtu)',
"mmbtu's", 'nuclear-mmbtu', 'nuclear-mmbt']
"""list: A list of fuel unit strings for million British Thermal Units."""
ferc1_mwdth_strings = \
['mwd therman', 'mw days-therm', 'mwd thrml', 'mwd thermal',
'mwd/mtu', 'mw days', 'mwdth', 'mwd', 'mw day', 'dth', 'mwdaysthermal',
'mw day therml', 'mw days thrml', 'nuclear mwd', 'mmwd', 'mw day/therml'
'mw days/therm', 'mw days (th', 'ermal)']
"""list: A list of fuel unit strings for megawatt days thermal."""
ferc1_mwhth_strings = ['mwh them', 'mwh threm', 'nwh therm', 'mwhth',
'mwh therm', 'mwh', 'mwh therms.', 'mwh term.uts',
'mwh thermal', 'mwh thermals', 'mw hr therm',
'mwh therma', 'mwh therm.uts']
"""list: A list of fuel unit strings for megawatt hours thermal."""
ferc1_fuel_unit_strings = {'ton': ferc1_ton_strings,
'mcf': ferc1_mcf_strings,
'bbl': ferc1_bbl_strings,
'gal': ferc1_gal_strings,
'1kgal': ferc1_1kgal_strings,
'gramsU': ferc1_gramsU_strings,
'kgU': ferc1_kgU_strings,
'mmbtu': ferc1_mmbtu_strings,
'mwdth': ferc1_mwdth_strings,
'mwhth': ferc1_mwhth_strings
}
"""
dict: A dictionary linking fuel units (keys) to lists of various strings
representing those fuel units (values)
"""
# Categorizing the strings from the FERC Form 1 Plant Kind (plant_kind) field
# into lists. There are many strings that weren't categorized,
# Solar and Solar Project were not classified as these do not indicate if they
# are solar thermal or photovoltaic. Variants on Steam (e.g. "steam 72" and
# "steam and gas") were classified based on additional research of the plants
# on the Internet.
ferc1_plant_kind_steam_turbine = [
'coal', 'steam', 'steam units 1 2 3', 'steam units 4 5',
'steam fossil', 'steam turbine', 'steam a', 'steam 100',
'steam units 1 2 3', 'steams', 'steam 1', 'steam retired 2013', 'stream',
'steam units 1,2,3', 'steam units 4&5', 'steam units 4&6',
'steam conventional', 'unit total-steam', 'unit total steam',
'*resp. share steam', 'resp. share steam', 'steam (see note 1,',
'steam (see note 3)', 'mpc 50%share steam', '40% share steam'
'steam (2)', 'steam (3)', 'steam (4)', 'steam (5)', 'steam (6)',
'steam (7)', 'steam (8)', 'steam units 1 and 2', 'steam units 3 and 4',
'steam (note 1)', 'steam (retired)', 'steam (leased)', 'coal-fired steam',
'oil-fired steam', 'steam/fossil', 'steam (a,b)', 'steam (a)', 'stean',
'steam-internal comb', 'steam (see notes)', 'steam units 4 & 6',
'resp share stm note3' 'mpc50% share steam', 'mpc40%share steam',
'steam - 64%', 'steam - 100%', 'steam (1) & (2)', 'resp share st note3',
'mpc 50% shares steam', 'steam-64%', 'steam-100%', 'steam (see note 1)',
'mpc 50% share steam', 'steam units 1, 2, 3', 'steam units 4, 5',
'steam (2)', 'steam (1)', 'steam 4, 5', 'steam - 72%', 'steam (incl i.c.)',
'steam- 72%', 'steam;retired - 2013', "respondent's sh.-st.",
"respondent's sh-st", '40% share steam', 'resp share stm note3',
'mpc50% share steam', 'resp share st note 3', '\x02steam (1)',
]
"""
list: A list of strings from FERC Form 1 for the steam turbine plant kind.
"""
ferc1_plant_kind_combustion_turbine = [
'combustion turbine', 'gt', 'gas turbine',
'gas turbine # 1', 'gas turbine', 'gas turbine (note 1)',
'gas turbines', 'simple cycle', 'combustion turbine',
'comb.turb.peak.units', 'gas turbine', 'combustion turbine',
'com turbine peaking', 'gas turbine peaking', 'comb turb peaking',
'combustine turbine', 'comb. turine', 'conbustion turbine',
'combustine turbine', 'gas turbine (leased)', 'combustion tubine',
'gas turb', 'gas turbine peaker', 'gtg/gas', 'simple cycle turbine',
'gas-turbine', 'gas turbine-simple', 'gas turbine - note 1',
'gas turbine #1', 'simple cycle', 'gasturbine', 'combustionturbine',
'gas turbine (2)', 'comb turb peak units', 'jet engine',
'jet powered turbine', '*gas turbine', 'gas turb.(see note5)',
'gas turb. (see note', 'combutsion turbine', 'combustion turbin',
'gas turbine-unit 2', 'gas - turbine', 'comb turbine peaking',
'gas expander turbine', 'jet turbine', 'gas turbin (lease',
'gas turbine (leased', 'gas turbine/int. cm', 'comb.turb-gas oper.',
'comb.turb.gas/oil op', 'comb.turb.oil oper.', 'jet', 'comb. turbine (a)',
'gas turb.(see notes)', 'gas turb(see notes)', 'comb. turb-gas oper',
'comb.turb.oil oper', 'gas turbin (leasd)', 'gas turbne/int comb',
'gas turbine (note1)', 'combution turbin', '* gas turbine',
'add to gas turbine', 'gas turbine (a)', 'gas turbinint comb',
'gas turbine (note 3)', 'resp share gas note3', 'gas trubine',
'*gas turbine(note3)', 'gas turbine note 3,6', 'gas turbine note 4,6',
'gas turbine peakload', 'combusition turbine', 'gas turbine (lease)',
'comb. turb-gas oper.', 'combution turbine', 'combusion turbine',
'comb. turb. oil oper', 'combustion burbine', 'combustion and gas',
'comb. turb.', 'gas turbine (lease', 'gas turbine (leasd)',
'gas turbine/int comb', '*gas turbine(note 3)', 'gas turbine (see nos',
'i.c.e./gas turbine', 'gas turbine/intcomb', 'cumbustion turbine',
'gas turb, int. comb.', 'gas turb, diesel', 'gas turb, int. comb',
'i.c.e/gas turbine', 'diesel turbine', 'comubstion turbine',
'i.c.e. /gas turbine', 'i.c.e/ gas turbine', 'i.c.e./gas tubine',
]
"""list: A list of strings from FERC Form 1 for the combustion turbine plant
kind.
"""
ferc1_plant_kind_combined_cycle = [
'Combined cycle', 'combined cycle', 'combined', 'gas & steam turbine',
'gas turb. & heat rec', 'combined cycle', 'com. cyc', 'com. cycle',
'gas turb-combined cy', 'combined cycle ctg', 'combined cycle - 40%',
'com cycle gas turb', 'combined cycle oper', 'gas turb/comb. cyc',
'combine cycle', 'cc', 'comb. cycle', 'gas turb-combined cy',
'steam and cc', 'steam cc', 'gas steam', 'ctg steam gas',
'steam comb cycle', 'gas/steam comb. cycl', 'steam (comb. cycle)'
'gas turbine/steam', 'steam & gas turbine', 'gas trb & heat rec',
'steam & combined ce', 'st/gas turb comb cyc', 'gas tur & comb cycl',
'combined cycle (a,b)', 'gas turbine/ steam', 'steam/gas turb.',
'steam & comb cycle', 'gas/steam comb cycle', 'comb cycle (a,b)', 'igcc',
'steam/gas turbine', 'gas turbine / steam', 'gas tur & comb cyc',
'comb cyc (a) (b)', 'comb cycle', 'comb cyc', 'combined turbine',
'combine cycle oper', 'comb cycle/steam tur', 'cc / gas turb',
'steam (comb. cycle)', 'steam & cc', 'gas turbine/steam',
'gas turb/cumbus cycl', 'gas turb/comb cycle', 'gasturb/comb cycle',
'gas turb/cumb. cyc', 'igcc/gas turbine', 'gas / steam', 'ctg/steam-gas',
'ctg/steam -gas'
]
"""
list: A list of strings from FERC Form 1 for the combined cycle plant kind.
"""
ferc1_plant_kind_nuke = [
'nuclear', 'nuclear (3)', 'steam(nuclear)', 'nuclear(see note4)'
'nuclear steam', 'nuclear turbine', 'nuclear - steam',
'nuclear (a)(b)(c)', 'nuclear (b)(c)', '* nuclear', 'nuclear (b) (c)',
'nuclear (see notes)', 'steam (nuclear)', '* nuclear (note 2)',
'nuclear (note 2)', 'nuclear (see note 2)', 'nuclear(see note4)',
'nuclear steam', 'nuclear(see notes)', 'nuclear-steam',
'nuclear (see note 3)'
]
"""list: A list of strings from FERC Form 1 for the nuclear plant kind."""
ferc1_plant_kind_geothermal = [
'steam - geothermal', 'steam_geothermal', 'geothermal'
]
"""list: A list of strings from FERC Form 1 for the geothermal plant kind."""
ferc_1_plant_kind_internal_combustion = [
'ic', 'internal combustion', 'internal comb.', 'internl combustion'
'diesel turbine', 'int combust (note 1)', 'int. combust (note1)',
'int.combustine', 'comb. cyc', 'internal comb', 'diesel', 'diesel engine',
'internal combustion', 'int combust - note 1', 'int. combust - note1',
'internal comb recip', 'reciprocating engine', 'comb. turbine',
'internal combust.', 'int. combustion (1)', '*int combustion (1)',
"*internal combust'n", 'internal', 'internal comb.', 'steam internal comb',
'combustion', 'int. combustion', 'int combust (note1)', 'int. combustine',
'internl combustion', '*int. combustion (1)'
]
"""
list: A list of strings from FERC Form 1 for the internal combustion plant
kind.
"""
ferc1_plant_kind_wind = [
'wind', 'wind energy', 'wind turbine', 'wind - turbine', 'wind generation'
]
"""list: A list of strings from FERC Form 1 for the wind plant kind."""
ferc1_plant_kind_photovoltaic = [
'solar photovoltaic', 'photovoltaic', 'solar', 'solar project'
]
"""list: A list of strings from FERC Form 1 for the photovoltaic plant kind."""
ferc1_plant_kind_solar_thermal = ['solar thermal']
"""
list: A list of strings from FERC Form 1 for the solar thermal plant kind.
"""
# Making a dictionary of lists from the lists of plant_fuel strings to create
# a dictionary of plant fuel lists.
ferc1_plant_kind_strings = {
'steam': ferc1_plant_kind_steam_turbine,
'combustion_turbine': ferc1_plant_kind_combustion_turbine,
'combined_cycle': ferc1_plant_kind_combined_cycle,
'nuclear': ferc1_plant_kind_nuke,
'geothermal': ferc1_plant_kind_geothermal,
'internal_combustion': ferc_1_plant_kind_internal_combustion,
'wind': ferc1_plant_kind_wind,
'photovoltaic': ferc1_plant_kind_photovoltaic,
'solar_thermal': ferc1_plant_kind_solar_thermal
}
"""
dict: A dictionary of plant kinds (keys) and associated lists of plant_fuel
strings (values).
"""
# This is an alternative set of strings for simplifying the plant kind field
# from Uday & Laura at CPI. For the moment we have reverted to using our own
# categorizations which are more detailed, but these are preserved here for
# comparison and testing, if need be.
cpi_diesel_strings = ['DIESEL', 'Diesel Engine', 'Diesel Turbine', ]
"""
list: A list of strings for fuel type diesel compiled by Climate Policy
Initiative.
"""
cpi_geothermal_strings = ['Steam - Geothermal', ]
"""
list: A list of strings for fuel type geothermal compiled by Climate Policy
Initiative.
"""
cpi_natural_gas_strings = [
'Combined Cycle', 'Combustion Turbine', 'GT',
'GAS TURBINE', 'Comb. Turbine', 'Gas Turbine #1', 'Combine Cycle Oper',
'Combustion', 'Combined', 'Gas Turbine/Steam', 'Gas Turbine Peaker',
'Gas Turbine - Note 1', 'Resp Share Gas Note3', 'Gas Turbines',
'Simple Cycle', 'Gas / Steam', 'GasTurbine', 'Combine Cycle',
'CTG/Steam-Gas', 'GTG/Gas', 'CTG/Steam -Gas', 'Steam/Gas Turbine',
'CombustionTurbine', 'Gas Turbine-Simple', 'STEAM & GAS TURBINE',
'Gas & Steam Turbine', 'Gas', 'Gas Turbine (2)', 'COMBUSTION AND GAS',
'Com Turbine Peaking', 'Gas Turbine Peaking', 'Comb Turb Peaking',
'JET ENGINE', 'Comb. Cyc', 'Com. Cyc', 'Com. Cycle',
'GAS TURB-COMBINED CY', 'Gas Turb', 'Combined Cycle - 40%',
'IGCC/Gas Turbine', 'CC', 'Combined Cycle Oper', 'Simple Cycle Turbine',
'Steam and CC', 'Com Cycle Gas Turb', 'I.C.E/ Gas Turbine',
'Combined Cycle CTG', 'GAS-TURBINE', 'Gas Expander Turbine',
'Gas Turbine (Leased)', 'Gas Turbine # 1', 'Gas Turbine (Note 1)',
'COMBUSTINE TURBINE', 'Gas Turb, Int. Comb.', 'Combined Turbine',
'Comb Turb Peak Units', 'Combustion Tubine', 'Comb. Cycle',
'COMB.TURB.PEAK.UNITS', 'Steam and CC', 'I.C.E. /Gas Turbine',
'Conbustion Turbine', 'Gas Turbine/Int Comb', 'Steam & CC',
'GAS TURB. & HEAT REC', 'Gas Turb/Comb. Cyc', 'Comb. Turine',
]
"""list: A list of strings for fuel type gas compiled by Climate Policy
Initiative.
"""
cpi_nuclear_strings = ['Nuclear', 'Nuclear (3)', ]
"""list: A list of strings for fuel type nuclear compiled by Climate Policy
Initiative.
"""
cpi_other_strings = [
'IC', 'Internal Combustion', 'Int Combust - Note 1',
'Resp. Share - Note 2', 'Int. Combust - Note1', 'Resp. Share - Note 4',
'Resp Share - Note 5', 'Resp. Share - Note 7', 'Internal Comb Recip',
'Reciprocating Engine', 'Internal Comb', 'Resp. Share - Note 8',
'Resp. Share - Note 9', 'Resp Share - Note 11', 'Resp. Share - Note 6',
'INT.COMBUSTINE', 'Steam (Incl I.C.)', 'Other', 'Int Combust (Note 1)',
'Resp. Share (Note 2)', 'Int. Combust (Note1)', 'Resp. Share (Note 8)',
'Resp. Share (Note 9)', 'Resp Share (Note 11)', 'Resp. Share (Note 4)',
'Resp. Share (Note 6)', 'Plant retired- 2013', 'Retired - 2013',
]
"""list: A list of strings for fuel type other compiled by Climate Policy
Initiative.
"""
cpi_steam_strings = [
'Steam', 'Steam Units 1, 2, 3', 'Resp Share St Note 3',
'Steam Turbine', 'Steam-Internal Comb', 'IGCC', 'Steam- 72%', 'Steam (1)',
'Steam (1)', 'Steam Units 1,2,3', 'Steam/Fossil', 'Steams', 'Steam - 72%',
'Steam - 100%', 'Stream', 'Steam Units 4, 5', 'Steam - 64%', 'Common',
'Steam (A)', 'Coal', 'Steam;Retired - 2013', 'Steam Units 4 & 6',
]
"""list: A list of strings for fuel type steam compiled by Climate Policy
Initiative.
"""
cpi_wind_strings = ['Wind', 'Wind Turbine', 'Wind - Turbine', 'Wind Energy', ]
"""list: A list of strings for fuel type wind compiled by Climate Policy
Initiative.
"""
cpi_solar_strings = [
'Solar Photovoltaic', 'Solar Thermal', 'SOLAR PROJECT', 'Solar',
'Photovoltaic',
]
"""list: A list of strings for fuel type photovoltaic compiled by Climate
Policy Initiative.
"""
cpi_plant_kind_strings = {
'natural_gas': cpi_natural_gas_strings,
'diesel': cpi_diesel_strings,
'geothermal': cpi_geothermal_strings,
'nuclear': cpi_nuclear_strings,
'steam': cpi_steam_strings,
'wind': cpi_wind_strings,
'solar': cpi_solar_strings,
'other': cpi_other_strings,
}
"""dict: A dictionary linking fuel types (keys) to lists of strings associated
by Climate Policy Institute with those fuel types (values).
"""
# Categorizing the strings from the FERC Form 1 Type of Plant Construction
# (construction_type) field into lists.
# There are many strings that weren't categorized, including crosses between
# conventional and outdoor, PV, wind, combined cycle, and internal combustion.
# The lists are broken out into the two types specified in Form 1:
# conventional and outdoor. These lists are inclusive so that variants of
# conventional (e.g. "conventional full") and outdoor (e.g. "outdoor full"
# and "outdoor hrsg") are included.
ferc1_const_type_outdoor = [
'outdoor', 'outdoor boiler', 'full outdoor', 'outdoor boiler',
'outdoor boilers', 'outboilers', 'fuel outdoor', 'full outdoor',
'outdoors', 'outdoor', 'boiler outdoor& full', 'boiler outdoor&full',
'outdoor boiler& full', 'full -outdoor', 'outdoor steam',
'outdoor boiler', 'ob', 'outdoor automatic', 'outdoor repower',
'full outdoor boiler', 'fo', 'outdoor boiler & ful', 'full-outdoor',
'fuel outdoor', 'outoor', 'outdoor', 'outdoor boiler&full',
'boiler outdoor &full', 'outdoor boiler &full', 'boiler outdoor & ful',
'outdoor-boiler', 'outdoor - boiler', 'outdoor const.',
'4 outdoor boilers', '3 outdoor boilers', 'full outdoor', 'full outdoors',
'full oudoors', 'outdoor (auto oper)', 'outside boiler',
'outdoor boiler&full', 'outdoor hrsg', 'outdoor hrsg',
'outdoor-steel encl.', 'boiler-outdr & full',
'con.& full outdoor', 'partial outdoor', 'outdoor (auto. oper)',
'outdoor (auto.oper)', 'outdoor construction', '1 outdoor boiler',
'2 outdoor boilers', 'outdoor enclosure', '2 outoor boilers',
'boiler outdr.& full', 'boiler outdr. & full', 'ful outdoor',
'outdoor-steel enclos', 'outdoor (auto oper.)', 'con. & full outdoor',
'outdore', 'boiler & full outdor', 'full & outdr boilers',
'outodoor (auto oper)', 'outdoor steel encl.', 'full outoor',
'boiler & outdoor ful', 'otdr. blr. & f. otdr', 'f.otdr & otdr.blr.',
'oudoor (auto oper)', 'outdoor constructin', 'f. otdr. & otdr. blr',
]
"""list: A list of strings from FERC Form 1 associated with the outdoor
construction type.
"""
ferc1_const_type_semioutdoor = [
'more than 50% outdoo', 'more than 50% outdos', 'over 50% outdoor',
'over 50% outdoors', 'semi-outdoor', 'semi - outdoor', 'semi outdoor',
'semi-enclosed', 'semi-outdoor boiler', 'semi outdoor boiler',
'semi- outdoor', 'semi - outdoors', 'semi -outdoor'
'conven & semi-outdr', 'conv & semi-outdoor', 'conv & semi- outdoor',
'convent. semi-outdr', 'conv. semi outdoor', 'conv(u1)/semiod(u2)',
'conv u1/semi-od u2', 'conv-one blr-semi-od', 'convent semioutdoor',
'conv. u1/semi-od u2', 'conv - 1 blr semi od', 'conv. ui/semi-od u2',
'conv-1 blr semi-od', 'conven. semi-outdoor', 'conv semi-outdoor',
'u1-conv./u2-semi-od', 'u1-conv./u2-semi -od', 'convent. semi-outdoo',
'u1-conv. / u2-semi', 'conven & semi-outdr', 'semi -outdoor',
'outdr & conventnl', 'conven. full outdoor', 'conv. & outdoor blr',
'conv. & outdoor blr.', 'conv. & outdoor boil', 'conv. & outdr boiler',
'conv. & out. boiler', 'convntl,outdoor blr', 'outdoor & conv.',
'2 conv., 1 out. boil', 'outdoor/conventional', 'conv. boiler outdoor',
'conv-one boiler-outd', 'conventional outdoor', 'conventional outdor',
'conv. outdoor boiler', 'conv.outdoor boiler', 'conventional outdr.',
'conven,outdoorboiler', 'conven full outdoor', 'conven,full outdoor',
'1 out boil, 2 conv', 'conv. & full outdoor', 'conv. & outdr. boilr',
'conv outdoor boiler', 'convention. outdoor', 'conv. sem. outdoor',
'convntl, outdoor blr', 'conv & outdoor boil', 'conv & outdoor boil.',
'outdoor & conv', 'conv. broiler outdor', '1 out boilr, 2 conv',
'conv.& outdoor boil.', 'conven,outdr.boiler', 'conven,outdr boiler',
'outdoor & conventil', '1 out boilr 2 conv', 'conv & outdr. boilr',
'conven, full outdoor', 'conven full outdr.', 'conven, full outdr.',
'conv/outdoor boiler', "convnt'l outdr boilr", '1 out boil 2 conv',
'conv full outdoor', 'conven, outdr boiler', 'conventional/outdoor',
'conv&outdoor boiler', 'outdoor & convention', 'conv & outdoor boilr',
'conv & full outdoor', 'convntl. outdoor blr', 'conv - ob',
"1conv'l/2odboilers", "2conv'l/1odboiler", 'conv-ob', 'conv.-ob',
'1 conv/ 2odboilers', '2 conv /1 odboilers', 'conv- ob', 'conv -ob',
'con sem outdoor', 'cnvntl, outdr, boilr', 'less than 50% outdoo',
'under 50% outdoor', 'under 50% outdoors', '1cnvntnl/2odboilers',
'2cnvntnl1/1odboiler', 'con & ob', 'combination (b)', 'indoor & outdoor',
'conven. blr. & full', 'conv. & otdr. blr.', 'combination',
'indoor and outdoor', 'conven boiler & full', "2conv'l/10dboiler",
'4 indor/outdr boiler', '4 indr/outdr boilerr', '4 indr/outdr boiler',
'indoor & outdoof',
]
"""list: A list of strings from FERC Form 1 associated with the semi - outdoor
construction type, or a mix of conventional and outdoor construction.
"""
ferc1_const_type_conventional = [
'conventional', 'conventional', 'conventional boiler', 'conv-b',
'conventionall', 'convention', 'conventional', 'coventional',
'conven full boiler', 'c0nventional', 'conventtional', 'convential'
'underground', 'conventional bulb', 'conventrional',
'*conventional', 'convential', 'convetional', 'conventioanl',
'conventioinal', 'conventaional', 'indoor construction', 'convenional',
'conventional steam', 'conventinal', 'convntional', 'conventionl',
'conventionsl', 'conventiional', 'convntl steam plants', 'indoor const.',
'full indoor', 'indoor', 'indoor automatic', 'indoor boiler',
'(peak load) indoor', 'conventionl,indoor', 'conventionl, indoor',
'conventional, indoor', 'comb. cycle indoor', '3 indoor boiler',
'2 indoor boilers', '1 indoor boiler', '2 indoor boiler',
'3 indoor boilers', 'fully contained', 'conv - b', 'conventional/boiler',
'cnventional', 'comb. cycle indooor', 'sonventional',
]
"""list: A list of strings from FERC Form 1 associated with the conventional
construction type.
"""
# Making a dictionary of lists from the lists of construction_type strings to
# create a dictionary of construction type lists.
ferc1_const_type_strings = {
'outdoor': ferc1_const_type_outdoor,
'semioutdoor': ferc1_const_type_semioutdoor,
'conventional': ferc1_const_type_conventional,
}
"""dict: A dictionary of construction types (keys) and lists of construction
type strings associated with each type (values) from FERC Form 1.
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
ferc714_pudl_tables = (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
)
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data.
"""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
# patterns for matching columns to months:
month_dict_eia923 = {1: '_january$',
2: '_february$',
3: '_march$',
4: '_april$',
5: '_may$',
6: '_june$',
7: '_july$',
8: '_august$',
9: '_september$',
10: '_october$',
11: '_november$',
12: '_december$'}
"""dict: A dictionary mapping column numbers (keys) to months (values).
"""
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple containing the list of EIA 860 tables that can be
successfully pulled into PUDL.
"""
eia861_pudl_tables = (
"service_territory_eia861",
)
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OC': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [ # base cols
['plant_id_eia'],
# static cols
['balancing_authority_code', 'balancing_authority_name',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude',
'nerc_region', 'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'net_metering', 'pipeline_notes',
'regulatory_status_code', 'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
# {'plant_id_eia': 'int64',
# 'grid_voltage_2_kv': 'float64',
# 'grid_voltage_3_kv': 'float64',
# 'grid_voltage_kv': 'float64',
# 'longitude': 'float64',
# 'latitude': 'float64',
# 'primary_purpose_naics_id': 'float64',
# 'sector_id': 'float64',
# 'zip_code': 'float64',
# 'utility_id_eia': 'float64'},
],
'generators': [ # base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'deliver_power_transgrid', 'summer_capacity_mw',
'winter_capacity_mw', 'minimum_load_mw', 'technology_description',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date', 'utility_id_eia'],
# need type fixing
{}
# {'plant_id_eia': 'int64',
# 'generator_id': 'str'},
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [ # base cols
['utility_id_eia'],
# static cols
['utility_name_eia',
'entity_type'],
# annual cols
['street_address', 'city', 'state', 'zip_code',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [ # base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{}, ]}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
# EPA CEMS constants #####
epacems_rename_dict = {
"STATE": "state",
# "FACILITY_NAME": "plant_name", # Not reading from CSV
"ORISPL_CODE": "plant_id_eia",
"UNITID": "unitid",
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": "op_date",
"OP_HOUR": "op_hour",
"OP_TIME": "operating_time_hours",
"GLOAD (MW)": "gross_load_mw",
"GLOAD": "gross_load_mw",
"SLOAD (1000 lbs)": "steam_load_1000_lbs",
"SLOAD (1000lb/hr)": "steam_load_1000_lbs",
"SLOAD": "steam_load_1000_lbs",
"SO2_MASS (lbs)": "so2_mass_lbs",
"SO2_MASS": "so2_mass_lbs",
"SO2_MASS_MEASURE_FLG": "so2_mass_measurement_code",
# "SO2_RATE (lbs/mmBtu)": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": "so2_rate_measure_flg", # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": "nox_rate_lbs_mmbtu",
"NOX_RATE": "nox_rate_lbs_mmbtu",
"NOX_RATE_MEASURE_FLG": "nox_rate_measurement_code",
"NOX_MASS (lbs)": "nox_mass_lbs",
"NOX_MASS": "nox_mass_lbs",
"NOX_MASS_MEASURE_FLG": "nox_mass_measurement_code",
"CO2_MASS (tons)": "co2_mass_tons",
"CO2_MASS": "co2_mass_tons",
"CO2_MASS_MEASURE_FLG": "co2_mass_measurement_code",
# "CO2_RATE (tons/mmBtu)": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": "co2_rate_measure_flg", # Not reading from CSV
"HEAT_INPUT (mmBtu)": "heat_content_mmbtu",
"HEAT_INPUT": "heat_content_mmbtu",
"FAC_ID": "facility_id",
"UNIT_ID": "unit_id_epa",
}
"""dict: A dictionary containing EPA CEMS column names (keys) and replacement
names to use when reading those columns into PUDL (values).
"""
# Any column that exactly matches one of these won't be read
epacems_columns_to_ignore = {
"FACILITY_NAME",
"SO2_RATE (lbs/mmBtu)",
"SO2_RATE",
"SO2_RATE_MEASURE_FLG",
"CO2_RATE (tons/mmBtu)",
"CO2_RATE",
"CO2_RATE_MEASURE_FLG",
}
"""set: The set of EPA CEMS columns to ignore when reading data.
"""
# Specify dtypes to for reading the CEMS CSVs
epacems_csv_dtypes = {
"STATE": pd.StringDtype(),
# "FACILITY_NAME": str, # Not reading from CSV
"ORISPL_CODE": pd.Int64Dtype(),
"UNITID": pd.StringDtype(),
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": pd.StringDtype(),
"OP_HOUR": pd.Int64Dtype(),
"OP_TIME": float,
"GLOAD (MW)": float,
"GLOAD": float,
"SLOAD (1000 lbs)": float,
"SLOAD (1000lb/hr)": float,
"SLOAD": float,
"SO2_MASS (lbs)": float,
"SO2_MASS": float,
"SO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "SO2_RATE (lbs/mmBtu)": float, # Not reading from CSV
# "SO2_RATE": float, # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": float,
"NOX_RATE": float,
"NOX_RATE_MEASURE_FLG": pd.StringDtype(),
"NOX_MASS (lbs)": float,
"NOX_MASS": float,
"NOX_MASS_MEASURE_FLG": pd.StringDtype(),
"CO2_MASS (tons)": float,
"CO2_MASS": float,
"CO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "CO2_RATE (tons/mmBtu)": float, # Not reading from CSV
# "CO2_RATE": float, # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"HEAT_INPUT (mmBtu)": float,
"HEAT_INPUT": float,
"FAC_ID": pd.Int64Dtype(),
"UNIT_ID": pd.Int64Dtype(),
}
"""dict: A dictionary containing column names (keys) and data types (values)
for EPA CEMS.
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
epacems_additional_plant_info_file = importlib.resources.open_text(
'pudl.package_data.epa.cems', 'plant_info_for_additional_cems_plants.csv')
"""typing.TextIO:
Todo:
Return to
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
read_excel_epaipm_dict = {
'transmission_single_epaipm': dict(
skiprows=3,
usecols='B:F',
index_col=[0, 1],
),
'transmission_joint_epaipm': {},
'load_curves_epaipm': dict(
skiprows=3,
usecols='B:AB',
),
'plant_region_map_epaipm_active': dict(
sheet_name='NEEDS v6_Active',
usecols='C,I',
),
'plant_region_map_epaipm_retired': dict(
sheet_name='NEEDS v6_Retired_Through2021',
usecols='C,I',
),
}
"""
dict: A dictionary of dictionaries containing EPA IPM tables and associated
information for reading those tables into PUDL (values).
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2019)),
'eia861': tuple(range(1990, 2019)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_years = {
'eia860': tuple(range(2009, 2019)),
'eia861': tuple(range(1999, 2019)),
'eia923': tuple(range(2009, 2019)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years for
each data source that are able to be ingested into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': eia861_pudl_tables,
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': ferc714_pudl_tables,
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "C<NAME>ooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
'notebook',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
'utility_id_ferc1': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_id_ferc1': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'report_year': pd.Int64Dtype(),
'report_date': 'datetime64[ns]',
},
"ferc714": { # INCOMPLETE
"report_year": pd.Int64Dtype(),
"utility_id_ferc714": pd.Int64Dtype(),
"utility_id_eia": pd.Int64Dtype(),
"utility_name_ferc714": pd.StringDtype(),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'balancing_authority_code': pd.StringDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'contact_firstname': pd.StringDtype(),
'contact_firstname2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'current_planned_operating_date': 'datetime64[ns]',
'deliver_power_transgrid': pd.BooleanDtype(),
'duct_burners': pd.BooleanDtype(),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.StringDtype(),
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': | pd.StringDtype() | pandas.StringDtype |
# -*- coding: utf-8 -*-
"""
Spyder Editor
"""
# =============================================================================
# # imports and prep
# =============================================================================
# imports
from pathlib import Path
import pandas as pd
import numpy as np
# set path | *replace with your path*
file_path = Path("C:/Users/lmk3n/OneDrive/Documents/MSBX_5405/Final_proj/")
# read in files; convert to pandas df
calendar_file = file_path / "calendar.csv"
listings_file = file_path / "listings.csv"
reviews_file = file_path / "reviews.csv"
calendar = pd.read_csv(calendar_file)
listings = | pd.read_csv(listings_file) | pandas.read_csv |
import numpy as np
import pandas as pd
import torch
from scipy.optimize import minimize
from scipy.special import loggamma, expit
from torch.nn.functional import log_softmax
from sim.Sample import get_batches
from sim.best_models import extract_best_run
from sim.EBayDataset import EBayDataset
from analyze.util import save_dict
from utils import load_inputs
from env.util import load_model
from inputs.const import NUM_OUT
from featnames import TEST, MODELS, CENSORED_MODELS, DISCRIM_MODELS, \
DISCRIM_MODEL, PLACEBO_MODEL, BYR_HIST_MODEL
def get_auc(s):
fp = s.index.values
fp_delta = fp[1:] - fp[:-1]
tp = s.values
tp_bar = (tp[1:] + tp[:-1]) / 2
auc = (fp_delta * tp_bar).sum()
return auc
def get_model_predictions(data):
"""
Returns predicted categorical distribution.
:param EBayDataset data: model to simulate
:return: np.array of probabilities
"""
# initialize neural net
net = load_model(data.name, verbose=False).to('cuda')
# get predictions from neural net
theta = []
batches = get_batches(data)
for b in batches:
for key, value in b.items():
if type(value) is dict:
b[key] = {k: v.to('cuda') for k, v in value.items()}
else:
b[key] = value.to('cuda')
theta.append(net(b['x']).cpu())
theta = torch.cat(theta)
# take softmax
theta = torch.cat((torch.zeros_like(theta), theta), dim=1)
p = np.exp(log_softmax(theta, dim=-1).numpy())
return p
def get_roc(model=None):
# vectors of predicted probabilities, by ground truth
data = EBayDataset(part=TEST, name=model)
y = data.d['y']
p = get_model_predictions(data)
p = p[:, 1]
p0, p1 = p[y == 0], p[y == 1]
# sweep out ROC curve
s = pd.Series()
dim = np.arange(0, 1 + 1e-8, 0.001)
for tau in dim:
fp = np.sum(p0 > tau) / len(p0)
tp = np.sum(p1 > tau) / len(p1)
s.loc[fp] = tp
s = s.sort_index()
# check for doubles
assert len(s.index) == len(s.index.unique())
# print accuracy and auc
print('{} accuracy: {}'.format(model, ((p >= .5) == y).mean()))
print('{} AUC: {}'.format(model, get_auc(s)))
return s
def get_baserate(y, num_out, censored=False):
if not censored:
p = np.array([(y == i).mean() for i in range(num_out)])
p = p[p > 0]
return np.sum(p * np.log(p))
else:
counts = np.array([(y == i).sum() for i in range(num_out)],
dtype='float64')
cens = np.array([(y == i).sum() for i in range(-num_out, 0)],
dtype='float64')
for i in range(num_out):
counts[i:] += cens[i] / (num_out - i)
assert (np.abs(counts.sum() - len(y)) < 1e-8)
p = counts / counts.sum()
p_arrival = p[y[y >= 0]]
p_cens = np.array([p[i:].sum() for i in y if i < 0])
return np.log(np.concatenate([p_arrival, p_cens], axis=0)).mean()
def count_loss(theta, y):
# transformations
pi = expit(theta[0])
params = np.exp(theta[1:])
a, b = params
# zeros
num_zeros = (y == 0).sum()
lnl = num_zeros * np.log(pi + (1-pi) * a / (a + b))
# non-zeros
y1 = y[y > 0]
lnl += len(y1) * (np.log(1-pi) + np.log(a) + loggamma(a + b) - loggamma(b))
lnl += np.sum(loggamma(b + y1) - np.log(a + b + y1) - loggamma(a + b + y1))
return -lnl
def main():
d = dict()
# loop over models, save training curves to dictionary
for m in MODELS:
print(m)
# initialize dictionary
key = 'bar_training_{}'.format(m)
d[key] = pd.Series()
# baserate
y = load_inputs(TEST, m)['y']
if m == BYR_HIST_MODEL:
res = minimize(lambda theta: count_loss(theta, y),
x0=np.array([0., 0., 0.]),
method='Nelder-Mead')
d[key]['Baserate'] = np.mean(-res.fun / len(y))
else:
num_out = NUM_OUT[m] if NUM_OUT[m] > 1 else 2
d[key]['Baserate'] = get_baserate(y, num_out,
censored=(m in CENSORED_MODELS))
# test and training values
_, lnl_test, lnl_train = extract_best_run(m)
d[key]['Train'] = lnl_train[-1]
d[key]['Test'] = lnl_test[-1]
# likelihood
d[key] = np.exp(d[key])
# roc curve
elem = []
names = {DISCRIM_MODEL: 'Discriminator',
PLACEBO_MODEL: 'Placebo'}
for m in DISCRIM_MODELS:
elem.append(get_roc(model=m).rename(names[m]))
d['simple_roc'] = | pd.concat(elem, axis=1) | pandas.concat |
import pandas as pd
import ast
import sys
import os.path
from pandas.core.algorithms import isin
sys.path.insert(1,
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import dateutil.parser as parser
from utils.mysql_utils import separator
from utils.io import read_json
from utils.scraping_utils import remove_html_tags
from utils.user_utils import infer_role
from graph.arango_utils import *
import pgeocode
def cast_to_float(v):
try:
return float(v)
except ValueError:
return v
def convert_to_iso8601(text):
date = parser.parse(text)
return date.isoformat()
def load_member_summaries(
source_dir="data_for_graph/members",
filename="company_check",
# concat_uk_sector=False
):
'''
LOAD FLAT FILES OF MEMBER DATA
'''
dfs = []
for membership_level in ("Patron", "Platinum", "Gold", "Silver", "Bronze", "Digital", "Freemium"):
summary_filename = os.path.join(source_dir, membership_level, f"{membership_level}_{filename}.csv")
print ("reading summary from", summary_filename)
dfs.append(pd.read_csv(summary_filename, index_col=0).rename(columns={"database_id": "id"}))
summaries = pd.concat(dfs)
# if concat_uk_sector:
# member_uk_sectors = pd.read_csv(f"{source_dir}/members_to_sector.csv", index_col=0)
# # for col in ("sectors", "divisions", "groups", "classes"):
# # member_uk_sectors[f"UK_{col}"] = member_uk_sectors[f"UK_{col}"].map(ast.literal_eval)
# summaries = summaries.join(member_uk_sectors, on="member_name", how="left")
return summaries
def populate_sectors(
source_dir="data_for_graph",
db=None):
'''
CREATE AND ADD SECTOR(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Sectors", db)
sectors = pd.read_csv(f"{source_dir}/all_sectors.csv", index_col=0)
i = 0
for _, row in sectors.iterrows():
sector_name = row["sector_name"]
print ("creating document for sector", sector_name)
document = {
"_key": str(i),
"name": sector_name,
"sector_name": sector_name,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_commerces(
data_dir="data_for_graph",
db=None):
'''
CREATE AND ADD COMMERCE(AS DEFINED IN MIM DB) NODES TO GRAPH
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Commerces", db)
commerces = pd.read_csv(f"{data_dir}/all_commerces_with_categories.csv", index_col=0)
commerces = commerces.drop_duplicates("commerce_name")
i = 0
for _, row in commerces.iterrows():
commerce = row["commerce_name"]
category = row["commerce_category"]
print ("creating document for commerce", commerce)
document = {
"_key": str(i),
"name": commerce,
"commerce": commerce,
"category": category,
"id": row["id"]
}
insert_document(db, collection, document)
i += 1
def populate_members(
cols_of_interest=[
"id",
"member_name",
"website",
"about_company",
"membership_level",
"tenancies",
"badges",
"accreditations",
"sectors", # add to member as list
"buys",
"sells",
"sic_codes",
"directors",
"Cash_figure",
"NetWorth_figure",
"TotalCurrentAssets_figure",
"TotalCurrentLiabilities_figure",
],
db=None):
'''
CREATE AND POPULATE MEMBER NODES
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Members", db, )
members = load_member_summaries(concat_uk_sector=False)
members = members[cols_of_interest]
members = members.drop_duplicates("member_name") # ensure no accidental duplicates
members = members.loc[~pd.isnull(members["tenancies"])]
members["about_company"] = members["about_company"].map(remove_html_tags, na_action="ignore")
members = members.sort_values("member_name")
i = 0
for _, row in members.iterrows():
member_name = row["member_name"]
if pd.isnull(member_name):
continue
document = {
"_key" : str(i),
"name": member_name,
**{
k: (row[k].split(separator) if not pd.isnull(row[k]) and k in {"sectors", "buys", "sells"}
else ast.literal_eval(row[k]) if not pd.isnull(row[k]) and k in {
"UK_sectors",
"UK_divisions",
"UK_groups",
"UK_classes",
"sic_codes",
"directors",
}
else cast_to_float(row[k]) if k in {"Cash_figure","NetWorth_figure","TotalCurrentAssets_figure","TotalCurrentLiabilities_figure"}
else row[k] if not pd.isnull(row[k])
else None)
for k in cols_of_interest
},
}
if not pd.isnull(row["directors"]):
directors_ = ast.literal_eval(row["directors"])
directors = []
for director in directors_:
if pd.isnull(director["director_name"]):
continue
if not pd.isnull(director["director_date_of_birth"]):
director["director_date_of_birth"] = insert_space(director["director_date_of_birth"], 3)
directors.append(director)
else:
directors = []
document["directors"] = directors
assert not pd.isnull(row["tenancies"])
tenancies = []
regions = []
for tenancy in row["tenancies"].split(separator):
tenancies.append(tenancy)
if tenancy == "Made in the Midlands":
regions.append("midlands")
else:
assert tenancy == "Made in Yorkshire", tenancy
regions.append("yorkshire")
document["tenancies"] = tenancies
document["regions"] = regions
for award in ("badge", "accreditation"):
award_name = f"{award}s"
if not pd.isnull(row[award_name]):
awards = []
for a in row[award_name].split(separator):
awards.append(a)
document[award_name] = awards
insert_document(db, collection, document)
i += 1
def add_SIC_hierarchy_to_members(db=None):
'''
USE SIC CODES TO MAP TO SECTOR USING FILE:
data/class_to_sector.json
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Members", db, )
get_sic_codes_query = f'''
FOR m IN Members
FILTER m.sic_codes != NULL
RETURN {{
_key: m._key,
sic_codes: m.sic_codes,
}}
'''
members = aql_query(db, get_sic_codes_query)
class_to_sector_map = read_json("data/class_to_sector.json")
for member in members:
sic_codes = member["sic_codes"]
sic_codes = [sic_code.split(" - ")[1]
for sic_code in sic_codes]
classes = set()
groups = set()
divisions = set()
sectors = set()
for sic_code in sic_codes:
if sic_code not in class_to_sector_map:
continue
classes.add(sic_code)
groups.add(class_to_sector_map[sic_code]["group"])
divisions.add(class_to_sector_map[sic_code]["division"])
sectors.add(class_to_sector_map[sic_code]["sector"])
document = {
"_key" : member["_key"],
"UK_classes": sorted(classes),
"UK_groups": sorted(groups),
"UK_divisions": sorted(divisions),
"UK_sectors": sorted(sectors),
}
insert_document(db, collection, document, verbose=True)
def populate_users(
data_dir="data_for_graph",
cols_of_interest=[
"id",
"full_name",
"email",
"company_name",
"company_position",
"company_role",
],
db=None):
'''
CREATE AND ADD USER NODES
'''
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Users", db, )
user_filename = f"{data_dir}/all_users.csv"
users = pd.read_csv(user_filename, index_col=0)
users["company_role"] = users.apply(
infer_role,
axis=1
)
i = 0
for _, row in users.iterrows():
user_name = row["full_name"]
if pd.isnull(user_name):
continue
document = {
"_key" : str(i),
"name": user_name,
**{
k: (row[k] if not pd.isnull(row[k]) else None)
for k in cols_of_interest
}
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_user_works_at(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("UserWorksAt", db, className="Edges")
user_filename = f"{data_dir}/all_users.csv"
users = pd.read_csv(user_filename, index_col=0)
users["company_role"] = users.apply(
infer_role,
axis=1
)
member_name_to_id = name_to_id(db, "Members", "id")
user_name_to_id = name_to_id(db, "Users", "id")
i = 0
for _, row in users.iterrows():
user_id = row["id"]
company_id = row["company_id"]
if user_id not in user_name_to_id:
continue
if company_id not in member_name_to_id:
continue
document = {
"_key" : str(i),
"name": "works_at",
"_from": user_name_to_id[user_id],
"_to": member_name_to_id[company_id],
"company_position": row["company_position"]
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_user_follows(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
user_follows_collection = connect_to_collection("UserFollows", db, className="Edges")
user_follows_members_collection = connect_to_collection("MemberMemberFollows", db, className="Edges")
user_follows_filename = os.path.join(data_dir, "all_user_follows.csv")
users = pd.read_csv(user_follows_filename, index_col=0)
member_name_to_id = name_to_id(db, "Members", "id")
user_name_to_id = name_to_id(db, "Users", "id")
i = 0
for _, row in users.iterrows():
user_id = row["id"]
if user_id not in user_name_to_id:
continue
user_name = row["full_name"]
employer_id = row["employer_id"]
followed_member_id = row["followed_member_id"]
if followed_member_id not in member_name_to_id:
continue
# user -> member
document = {
"_key" : str(i),
"name": "follows",
"_from": user_name_to_id[user_id],
"_to": member_name_to_id[followed_member_id]
}
print ("inserting data", document)
insert_document(db, user_follows_collection, document)
# member -> member
if employer_id in member_name_to_id:
document = {
"_key" : str(i),
"name": "follows",
"_from": member_name_to_id[employer_id],
"_to": member_name_to_id[followed_member_id],
"followed_by": user_name,
}
print ("inserting data", document)
insert_document(db, user_follows_members_collection, document)
i += 1
def populate_member_sectors(
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("InSector", db, className="Edges")
members = load_member_summaries()
i = 0
member_name_to_id = name_to_id(db, "Members", "id")
sector_name_to_id = name_to_id(db, "Sectors", "sector_name")
for _, row in members.iterrows():
member_id = row["id"]
if member_id not in member_name_to_id:
continue
sectors = row["sectors"]
if pd.isnull(sectors):
continue
sectors = sectors.split(separator)
for sector in sectors:
document = {
"_key" : str(i),
"name": "in_sector",
"_from": member_name_to_id[member_id],
"_to": sector_name_to_id[sector],
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_member_commerces(
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("PerformsCommerce", db, className="Edges")
members = load_member_summaries()
i = 0
member_name_to_id = name_to_id(db, "Members", "id")
commerce_name_to_id = name_to_id(db, "Commerces", "commerce")
for _, row in members.iterrows():
member_id = row["id"]
if member_id not in member_name_to_id:
continue
for commerce_type in ("buys", "sells"):
commerce = row[commerce_type]
if not pd.isnull(commerce):
commerce = commerce.split(separator)
for c in commerce:
if c=="":
assert False
continue
document = {
"_key" : str(i),
"name": commerce_type,
"_from": member_name_to_id[member_id],
"_to": commerce_name_to_id[c],
"commerce_type": commerce_type
}
print ("inserting data", document)
insert_document(db, collection, document)
i += 1
def populate_messages(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Messages", db, className="Edges")
message_filename = os.path.join(data_dir, "all_messages.csv")
messages = pd.read_csv(message_filename, index_col=0)
messages = messages.drop_duplicates()
i = 0
user_name_to_id = name_to_id(db, "Users", "id")
for _, row in messages.iterrows():
sender_id = row["sender_id"]
if sender_id not in user_name_to_id:
continue
subject = row["subject"]
message = row["message"]
message = remove_html_tags(message)
timestamp = str(row["created_at"])
# TODO characterise messages
# recipients = json.loads(row["all_recipients"])
# for recipient in recipients:
# receiver = recipient["name"]
receiver_id = row["recipient_id"]
# receiver_member = row["recipient_member_name"]
if receiver_id not in user_name_to_id:
continue
if sender_id == receiver_id:
continue
document = {
"_key": str(i),
"name": "messages",
"_from": user_name_to_id[sender_id],
"_to": user_name_to_id[receiver_id],
"subject": subject,
"message": message,
"sent_at": convert_to_iso8601(timestamp),
}
insert_document(db, collection, document)
i += 1
def populate_member_member_business(
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("MemberMemberBusiness", db, className="Edges")
member_name_to_id = name_to_id(db, "Members", "member_name")
i = 0
# articles
for region in ("yorkshire", "midlands"):
filename = os.path.join("members", f"member_member_partnerships - {region}_matched.csv")
member_member_business = pd.read_csv(filename, index_col=None)
for _, row in member_member_business.iterrows():
member_1 = row["member_1_best_matching_member"]
member_2 = row["member_2_best_matching_member"]
if member_1 not in member_name_to_id:
continue
if member_2 not in member_name_to_id:
continue
article_title = row["article_title"]
document = {
# "_key": sanitise_key(f"{member_1}_{member_2}_article"),
"_key": str(i),
"name": "does_business",
# "_from": f"Members/{sanitise_key(member_1)}",
"_from": member_name_to_id[member_1],
# "_to": f"Members/{sanitise_key(member_2)}",
"_to": member_name_to_id[member_2],
"source": "article",
"article_title": article_title,
"region": region
}
insert_document(db, collection, document)
i += 1
# survey connections
connections_filename="survey/final_processed_connections.csv"
survey_connections = pd.read_csv(connections_filename, index_col=0)
for _, row in survey_connections.iterrows():
member_1 = row["best_matching_member_name"]
member_2 = row["submitted_partner_best_matching_member_name"]
if member_1 not in member_name_to_id:
continue
if member_2 not in member_name_to_id:
continue
document = {
# "_key": sanitise_key(f"{member_1}_{member_2}_survey"),
"_key": str(i),
"name": "does_business",
# "_from": f"Members/{sanitise_key(member_1)}",
"_from": member_name_to_id[member_1],
"_to": f"Members/{sanitise_key(member_2)}",
"_to": member_name_to_id[member_2],
"source": "survey",
}
insert_document(db, collection, document)
i += 1
def populate_events(
data_dir="data_for_graph",
cols_of_interest = [
"id",
"event_name",
"event_type",
"tenants",
"members",
"description",
"status",
"venue",
"starts_at",
"ends_at",
],
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Events", db,)
events_df_filename = os.path.join(data_dir, "all_events.csv")
events_df = pd.read_csv(events_df_filename, index_col=0)
# events_df = events_df.drop_duplicates(["event_name", "starts_at"])
i = 0
for _, row in events_df.iterrows():
event_name = row["event_name"]
document = {
"_key" : str(i),
"name": event_name,
**{
k: (convert_to_iso8601(row[k]) if not pd.isnull(row[k]) and k in ("starts_at", "ends_at", )
else row[k].split(separator) if not pd.isnull(row[k]) and k in ("tenants", "distinct_event_tags", "members")
else row[k] if not pd.isnull(row[k]) else None)
for k in cols_of_interest
}
}
insert_document(db, collection, document)
i += 1
def populate_event_sessions(
data_dir="data_for_graph",
db=None,
):
if db is None:
db = connect_to_mim_database()
event_session_collection = connect_to_collection("EventSessions", db,)
event_to_session_collection = connect_to_collection("EventHasSession", db, className="Edges",)
event_session_filename = os.path.join(data_dir, "all_event_sessions.csv")
event_session_df = pd.read_csv(event_session_filename, index_col=0)
event_name_to_id = name_to_id(db, "Events", "id")
i = 0
for _, row in event_session_df.iterrows():
session_name = row["session_name"]
document = {
"_key" : str(i),
"name": session_name,
**{
k: (row[k] if not pd.isnull(row[k]) else None)
for k in row.index
}
}
insert_document(db, event_session_collection, document)
# event -> session
event_id = row["event_id"]
if event_id in event_name_to_id:
document = {
"_key" : str(i),
"_from": event_name_to_id[event_id],
"_to": f"EventSessions/{i}",
"name": "has_session",
}
insert_document(db, event_to_session_collection, document)
i += 1
def populate_event_attendees(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("EventAttendees", db, className="Edges")
event_attendees_df = pd.read_csv(f"{data_dir}/all_event_attendees.csv", index_col=0)
event_name_to_id = name_to_id(db, "Events", "id")
user_name_to_id = name_to_id(db, "Users", "id")
i = 0
for _, row in event_attendees_df.iterrows():
attendee_id = row["user_id"]
if attendee_id not in user_name_to_id:
continue
event_id = row["event_id"]
if event_id not in event_name_to_id:
continue
document = {
"_key": str(i),
"name": "attends",
"_from": user_name_to_id[attendee_id],
"_to": event_name_to_id[event_id],
"attended": row["attended"],
}
insert_document(db, collection, document)
i += 1
def populate_event_session_attendees(
data_dir="data_for_graph",
db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("EventSessionAttendees", db, className="Edges")
event_attendees_df = pd.read_csv(f"{data_dir}/all_event_session_attendees.csv", index_col=0)
session_name_to_id = name_to_id(db, "EventSessions", "id")
user_name_to_id = name_to_id(db, "Users", "id")
i = 0
for _, row in event_attendees_df.iterrows():
attendee_id = row["user_id"]
if attendee_id not in user_name_to_id:
continue
session_id = row["session_id"]
if session_id not in session_name_to_id:
continue
document = {
"_key": str(i),
"name": "attends_session",
"_from": user_name_to_id[attendee_id],
"_to": session_name_to_id[session_id],
}
insert_document(db, collection, document)
i += 1
# def populate_member_member_sector_connections(
# data_dir="data_for_graph",
# db=None):
# if db is None:
# db = connect_to_mim_database()
# collection = connect_to_collection("MemberMemberSector", db, className="Edges")
# members_in_sector = read_json(f"{data_dir}/member_sectors.json")
# member_name_to_id = name_to_id(db, "Members", "member_name")
# i = 0
# for sector in members_in_sector:
# num_members_in_sector = len(members_in_sector[sector])
# for m1 in range(num_members_in_sector):
# member_1 = members_in_sector[sector][m1]
# if member_1 not in member_name_to_id:
# continue
# for m2 in range(m1+1, num_members_in_sector):
# member_2 = members_in_sector[sector][m2]
# if member_2 not in member_name_to_id:
# continue
# document = {
# # "_key": sanitise_key(f"{attendee_name}_{event_name}_{starts_at}"),
# "_key": str(i),
# "name": f"in_sector_{sector}",
# "_from": member_name_to_id[member_1],
# "_to": member_name_to_id[member_2],
# }
# insert_document(db, collection, document, verbose=True)
# i += 1
# def populate_member_member_commerce_connections(
# data_dir="data_for_graph",
# db=None):
# if db is None:
# db = connect_to_mim_database()
# collection = connect_to_collection("MemberMemberCommerce", db, className="Edges")
# members_in_commerces = read_json(f"{data_dir}/member_commerces(commerces).json")
# member_name_to_id = name_to_id(db, "Members", "member_name")
# i = 0
# for commerce in members_in_commerces:
# sells = members_in_commerces[commerce]["sells"]
# buys = members_in_commerces[commerce]["buys"]
# for member_1, member_2 in product(sells, buys):
# if member_1 not in member_name_to_id:
# continue
# if member_2 not in member_name_to_id:
# continue
# document = {
# # "_key": sanitise_key(f"{attendee_name}_{event_name}_{starts_at}"),
# "_key": str(i),
# "name": f"commerce_{commerce}",
# "_from": member_name_to_id[member_1],
# "_to": member_name_to_id[member_2],
# }
# insert_document(db, collection, document, verbose=True)
# i += 1
# def populate_member_member_event_connections(
# data_dir="data_for_graph",
# db=None):
# if db is None:
# db = connect_to_mim_database()
# collection = connect_to_collection("MemberMemberEvents", db, className="Edges")
# members_at_events = read_json(f"{data_dir}/all_event_attendees_production.json")
# member_name_to_id = name_to_id(db, "Members", "member_name")
# i = 0
# for event in members_at_events:
# attending_members = members_at_events[event]
# for member_1, member_2 in combinations(attending_members, 2):
# if member_1 not in member_name_to_id:
# continue
# if member_2 not in member_name_to_id:
# continue
# document = {
# "_key": str(i),
# "name": f"event_{event}",
# "_from": member_name_to_id[member_1],
# "_to": member_name_to_id[member_2],
# }
# insert_document(db, collection, document, verbose=True)
# i += 1
def populate_uk_sector_hierarchy(db=None):
'''
USE HIERARCHY IN FILE data/SIC_hierarchy.json TO BUILD TREE OF SIC STRUCTURE
'''
if db is None:
db = connect_to_mim_database()
uk_sectors = read_json("data/SIC_hierarchy.json")
classes_collection = connect_to_collection("UKClasses", db)
class_hierarchy_collection = connect_to_collection("UKClassHierarchy", db, className="Edges")
i = 0
j = 0
for sector in uk_sectors:
current_sector_id = i
document = {
"_key": str(i),
"name": f"sector_{sector}",
"type": "sector",
"identifier": sector
}
insert_document(db, classes_collection, document, verbose=True)
i += 1
for division in uk_sectors[sector]:
current_division_id = i
document = {
"_key": str(i),
"name": f"division_{division}",
"type": "division",
"identifier": division
}
insert_document(db, classes_collection, document, verbose=True)
i += 1
# add division to sector edge
document = {
"_key": str(j),
"_from": f"UKClasses/{current_division_id}",
"_to": f"UKClasses/{current_sector_id}",
"name": "InSector"
}
insert_document(db, class_hierarchy_collection, document, verbose=True)
j += 1
for group in uk_sectors[sector][division]:
current_group_id = i
document = {
"_key": str(i),
"name": f"group_{group}",
"type": "group",
"identifier": group
}
insert_document(db, classes_collection, document, verbose=True)
i += 1
# add group to division edge
document = {
"_key": str(j),
"_from": f"UKClasses/{current_group_id}",
"_to": f"UKClasses/{current_division_id}",
"name": "InDivision"
}
insert_document(db, class_hierarchy_collection, document, verbose=True)
j += 1
for c in uk_sectors[sector][division][group]:
current_class_id = i
document = {
"_key": str(i),
"name": f"class_{c}",
"type": "class",
"identifier": c
}
insert_document(db, classes_collection, document, verbose=True)
i += 1
# add group to division edge
document = {
"_key": str(j),
"_from": f"UKClasses/{current_class_id}",
"_to": f"UKClasses/{current_group_id}",
"name": "InGroup"
}
insert_document(db, class_hierarchy_collection, document, verbose=True)
j += 1
def populate_members_to_uk_class(db=None):
if db is None:
db = connect_to_mim_database()
'''
ADD EDGES TO CONNECT MEMBER NODES TO CLASS
'''
collection = connect_to_collection( "MembersToClass", db, className="Edges")
uk_class_to_id = name_to_id(db, "UKClasses", "name")
query = f'''
FOR m IN Members
FILTER m.UK_classes != NULL
FILTER LENGTH(m.UK_classes) > 0
RETURN {{
id: m._id,
UK_classes: m.UK_classes,
UK_groups: m.UK_groups,
UK_divisions: m.UK_divisions,
UK_sectors: m.UK_sectors,
}}
'''
members_to_sector = aql_query(db, query)
i = 0
for member_assignments in members_to_sector:
assignments = {
"sector": member_assignments["UK_sectors"],
"division": member_assignments["UK_divisions"],
"group": member_assignments["UK_groups"],
"class": member_assignments["UK_classes"],
}
# ADD ALL LEVELS
for key in ("class", "group", "division", "sector"):
for c in assignments[key]:
c = f"{key}_{c}" # name is type_identifier
document = {
"_key": str(i),
"_from": member_assignments["id"],
"_to": uk_class_to_id[c],
"name": f"in_{key}",
}
insert_document(db, collection, document, verbose=True)
i += 1
def insert_space(string, integer):
return string[0:integer] + ' ' + string[integer:]
def populate_prospects(db=None):
if db is None:
db = connect_to_mim_database()
collection = connect_to_collection("Prospects", db, )
print ("reading postcode to lat-long mapping")
postcode_to_lat_long = pd.read_csv("postcode_to_lat_long.csv", index_col=1)
postcode_to_lat_long = {
postcode: [row["lat"], row["long"]]
for postcode, row in postcode_to_lat_long.iterrows()
}
nomi = pgeocode.Nominatim('gb')
i = 0
'''
ENDOLE
'''
prospects = []
for region in ("yorkshire", "midlands"):
filename = os.path.join("competitors", region, f"{region}_competitors_filtered_by_website.csv",)
region_prospects = pd.read_csv(filename, index_col=0)
prospects.append(region_prospects)
prospects = pd.concat(prospects)
prospects = prospects = prospects[~prospects.index.duplicated(keep='first')]
for prospect, row in prospects.iterrows():
document = {
"_key" : str(i),
"name": prospect,
**{
k.replace(".", "_"): (row[k].split(separator) if not pd.isnull(row[k]) and k in {}
else cast_to_float(row[k]) if not pd.isnull(row[k]) and k in {
"Cash in Bank_figure","Cash in Bank_trend","Cash in Bank_trend_change","Debt Ratio (%)_figure",
"Debt Ratio (%)_trend","Debt Ratio (%)_trend_change","Employees_figure","Employees_trend",
"Employees_trend_change","Net Assets_figure","Net Assets_trend","Net Assets_trend_change","Total Assets_figure",
"Total Assets_trend","Total Assets_trend_change","Total Liabilities_figure","Total Liabilities_trend",
"Total Liabilities_trend_change","Turnover _figure","Turnover _trend","Turnover _trend_change","Year Ended_figure",
}
else ast.literal_eval(row[k]) if not pd.isnull(row[k]) and k in {
"sic_codes",
"relevant_members",
"competitors",
}
else row[k] if not pd.isnull(row[k])
else None)
for k in set(row.index) - {"directors"}
},
}
if not pd.isnull(row["directors"]):
directors_ = ast.literal_eval(row["directors"])
directors = []
for director in directors_:
split = director["name"].split(" • ")
name = split[0]
name = name.replace("Director", "")
name = name.replace("Secretary", "")
age = split[-1]
age = age.replace("Born in ", "")
if age == name:
# age = None
continue
occupation = director["occupation"]
director = {
"director_name": name,
"director_date_of_birth": age,
"director_occupation": occupation,
}
directors.append(director)
else:
directors = []
document["directors"] = directors
document["source"] = "endole"
postcode = row["postcode"]
latitude = None
longitude = None
coordinates = None
if not pd.isnull(latitude) and not pd.isnull(longitude):
coordinates = [latitude, longitude]
elif not pd.isnull(postcode) and postcode in postcode_to_lat_long:
latitude, longitude = postcode_to_lat_long[postcode]
coordinates = [latitude, longitude]
elif not pd.isnull(postcode):
coords = nomi.query_postal_code(postcode)
if not pd.isnull(coords["latitude"]):
latitude = coords["latitude"]
longitude = coords["longitude"]
coordinates = [latitude, longitude]
document["latitude"] = latitude
document["longitude"] = longitude
document["coordinates"] = coordinates
insert_document(db, collection, document, verbose=False)
i += 1
'''
COMPANY CHECK AND BASE
'''
for source in ("companies_house", "base"):
current_prospects = name_to_id(db, "Prospects", "name")
prospects = []
for region in ("yorkshire", "midlands"):
filename = os.path.join(source, region, f"{region}_company_check.csv",)
region_prospects = pd.read_csv(filename, index_col=0)
if "website" not in region_prospects.columns:
websites_filename = os.path.join(source, region, f"{region}_company_websites.csv",)
if os.path.exists(websites_filename):
websites = pd.read_csv(websites_filename, index_col=0)
region_prospects = region_prospects.join(websites, how="inner", )
prospects.append(region_prospects)
prospects = pd.concat(prospects)
prospects = prospects = prospects[~prospects.index.duplicated(keep='first')]
prospects = prospects.loc[~prospects.index.isin(current_prospects)]
for prospect, row in prospects.iterrows():
document = {
"_key" : str(i),
"name": prospect,
**{
k.replace(".", "_"): (row[k].split(separator) if not pd.isnull(row[k]) and k in {}
else cast_to_float(row[k]) if not pd.isnull(row[k]) and k in {
"Cash_figure",
"NetWorth_figure",
"TotalCurrentAssets_figure",
"TotalCurrentLiabilities_figure",
}
else ast.literal_eval(row[k]) if not pd.isnull(row[k]) and k in {
"sic_codes",
}
else row[k] if not pd.isnull(row[k])
else None)
for k in set(row.index) - {"directors"}
},
}
if not pd.isnull(row["directors"]):
directors_ = ast.literal_eval(row["directors"])
directors = []
for director in directors_:
if pd.isnull(director["director_name"]):
continue
if not pd.isnull(director["director_date_of_birth"]):
director["director_date_of_birth"] = insert_space(director["director_date_of_birth"], 3)
directors.append(director)
else:
directors = []
document["directors"] = directors
document["source"] = source
postcode = document["postcode"]
latitude = None
longitude = None
coordinates = None
if not pd.isnull(latitude) and not | pd.isnull(longitude) | pandas.isnull |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
import numpy as np
import pytest
import pandas as pd
from pandas import Series, compat
from pandas.core.indexes.period import IncompatibleFrequency
import pandas.util.testing as tm
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic(object):
@pytest.mark.parametrize(
'ts',
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(),
lambda x: tm.makeFloatSeries(),
True)
])
@pytest.mark.parametrize('opname', ['add', 'sub', 'mul', 'floordiv',
'truediv', 'div', 'pow'])
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename('ts')
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
if opname == 'div' and compat.PY3:
pytest.skip('div test only for Py3')
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
class TestSeriesArithmetic(object):
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_empty_series_add_sub(self):
# GH#13844
a = Series(dtype='M8[ns]')
b = Series(dtype='m8[ns]')
tm.assert_series_equal(a, a + b)
tm.assert_series_equal(a, a - b)
tm.assert_series_equal(a, b + a)
with pytest.raises(TypeError):
b - a
def test_add_series_with_period_index(self):
rng = pd.period_range('1/1/2000', '1/1/2010', freq='A')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with tm.assert_raises_regex(IncompatibleFrequency, msg):
ts + ts.asfreq('D', how="end")
def test_operators_datetimelike(self):
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
# ## datetime64 ###
dt1 = Series([pd.Timestamp('20111230'), pd.Timestamp('20120101'),
pd.Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([pd.Timestamp('20111231'), pd.Timestamp('20120102'),
pd.Timestamp('20120104')])
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
td1 + dt1
dt1 + td1
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison(object):
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assert_raises_regex(ValueError, msg):
getattr(left, op)(right, axis=1)
class TestSeriesComparison(object):
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
with pytest.raises(ValueError):
a < b
a = Series([1, 2])
b = | Series([2, 3, 4]) | pandas.Series |
#!/usr/bin/python3
#!/usr/bin/env python3
import re
import csv
import xlsxwriter
import pandas as pd
SR= open("shortreadExactMatchTranscripts.txt")
file1=SR.readlines()
# TO Count number of lines in the file
countFile1=0
for i in file1:
if i.strip():
countFile1 +=1
print("Number of lines in file1: ")
print(countFile1)
LR= open("directExactMatchTranscripts.txt")
file2=LR.readlines()
# TO Count number of lines in the file
countFile2=0
for i in file2:
if i.strip():
countFile2 +=1
print("Number of lines in file2: ")
print(countFile2)
M=open("mergedDirectExactMatchTranscripts.txt")
file3= M.readlines()
# TO Count number of lines in the file
countFile3=0
for i in file3:
if i.strip():
countFile3 +=1
print("Number of lines in file3: ")
print(countFile3)
# Matches column 2
regexString='\t[A-Za-z,0-9,\\.\\-]{1,100}[|]rna[0-9]{1,100}'
#matches 1st and 2nd column
# regexString= 'TCONS_[0-9]{1,9}\tXLOC_[0-9]{1,9}\t[A-Z,0-9,\\.\\-]{3,100}[|]rna[0-9]{1,6}'
# regexString= "TCONS_[0-9]*\tXLOC_[0-9]{1,9}\t\'?\w+([-']\w+)*[|]rna[0-9]{1,6}"
# Counting matching Regex. Should be same as number of lines in file 1.
shortReadCount=0
for i in file1:
temp=re.findall(regexString,i)
if(temp):
# print(temp)
shortReadCount +=1
print("Shortread Count")
print(shortReadCount)
# Counting matching Regex. Should be same as number of lines in file 2.
longReadCount=0
for i in file2:
temp=re.findall(regexString,i)
#print(temp)
if(temp):
longReadCount +=1
print("Longread count")
print(longReadCount)
# Counting matching Regex. Should be same as number of lines in file 3.
mergedCount=0
for i in file3:
temp=re.findall(regexString,i)
if(temp):
#print(temp)
mergedCount +=1
print("Merged count")
print(mergedCount)
shortReadCount=0
# Creating a list to store the matched Regex
finalTranscriptId = []
# If regex is matched, it is added in the list
for i in file1:
temp=re.findall(regexString,i)
if(temp):
# print(temp[0])
finalTranscriptId.append(temp[0])
print("Length of final after Shortreads :")
print(len(finalTranscriptId))
for i in file2:
temp=re.findall(regexString,i)
if(temp):
# print(temp[0])
finalTranscriptId.append(temp[0])
print("Length of final after longreads :")
print(len(finalTranscriptId))
for i in file3:
temp=re.findall(regexString,i)
if(temp):
# print(temp[0])
finalTranscriptId.append(temp[0])
print("Length of final after Merged reads :")
print(len(finalTranscriptId))
#Another method to remove duplicates
# res=[]
# for i in finalTranscriptId:
# if i not in res:
# res.append(i)
# # printing list after removal
# print ("The list after removing duplicates : ")
# print(len(res))
# Removing Duplicates from final Transcript ID Column list
finalTranscriptId = list(dict.fromkeys(finalTranscriptId))
print("After removing duplicates:")
print(len(finalTranscriptId))
#print(finalTranscriptId)
finalDictionary = []
# Iterating through dictionary
for indexFinal in finalTranscriptId:
tempDictionary = {}
tempDictionary['Transcript'] = indexFinal
# print(indexFinal)
for indexFile1 in file1:
if indexFinal in indexFile1:
tempDictionary['File 1'] = 'Yes'
# print("Found in File 1")
for indexFile2 in file2:
if indexFinal in indexFile2:
tempDictionary['File 2'] = 'Yes'
# print("Found in File 2")
for indexFile3 in file3:
if indexFinal in indexFile3:
tempDictionary['File 3'] = 'Yes'
# print("Found in File 3")
#print(tempDictionary)
finalDictionary.append(tempDictionary)
#print(finalDictionary)
#print(len(finalDictionary))
# for j in range(len(file2)):
# if temp[0] in file2[j]:
# shortReadCount +=1
#print("Shortread Count")
#print(shortReadCount)
print("Writing to File")
finalDictionary= | pd.DataFrame(finalDictionary) | pandas.DataFrame |
"""Main module."""
import json
from collections import defaultdict
import numpy as np
import pandas as pd
from copy import deepcopy
from math import nan, isnan
from .constants import IMAGING_PARAMS
DIRECT_IMAGING_PARAMS = IMAGING_PARAMS - set(["NSliceTimes"])
def check_merging_operations(action_csv, raise_on_error=False):
"""Checks that the merges in an action csv are possible.
To be mergable the
"""
actions = pd.read_csv(action_csv)
ok_merges = []
deletions = []
overwrite_merges = []
sdc_incompatible = []
sdc_cols = set([col for col in actions.columns if
col.startswith("IntendedForKey") or
col.startswith("FieldmapKey")])
def _check_sdc_cols(meta1, meta2):
return {key: meta1[key] for key in sdc_cols} == \
{key: meta2[key] for key in sdc_cols}
needs_merge = actions[np.isfinite(actions['MergeInto'])]
for _, row_needs_merge in needs_merge.iterrows():
source_param_key = tuple(row_needs_merge[["MergeInto", "KeyGroup"]])
dest_param_key = tuple(row_needs_merge[["ParamGroup", "KeyGroup"]])
dest_metadata = row_needs_merge.to_dict()
source_row = actions.loc[
(actions[["ParamGroup", "KeyGroup"]] == source_param_key).all(1)]
if source_param_key[0] == 0:
print("going to delete ", dest_param_key)
deletions.append(dest_param_key)
continue
if not source_row.shape[0] == 1:
raise Exception("Could not identify a unique source group")
source_metadata = source_row.iloc[0].to_dict()
merge_id = (source_param_key, dest_param_key)
# Check for compatible fieldmaps
if not _check_sdc_cols(source_metadata, dest_metadata):
sdc_incompatible.append(merge_id)
continue
if not merge_without_overwrite(source_metadata, dest_metadata,
raise_on_error=raise_on_error):
overwrite_merges.append(merge_id)
continue
# add to the list of ok merges if there are no conflicts
ok_merges.append(merge_id)
error_message = "\n\nProblems were found in the requested merge.\n" \
"===========================================\n\n"
if sdc_incompatible:
error_message += "Some merges are incompatible due to differing " \
"distortion correction strategies. Check that " \
"fieldmaps exist and have the correct " \
"\"IntendedFor\" in their sidecars. These merges " \
"could not be completed:\n"
error_message += print_merges(sdc_incompatible) + "\n\n"
if overwrite_merges:
error_message += "Some merges are incompatible because the metadata " \
"in the destination json conflicts with the values " \
"in the source json. Merging should only be used " \
"to fill in missing metadata. The following " \
"merges could not be completed:\n\n"
error_message += print_merges(overwrite_merges)
if overwrite_merges or sdc_incompatible:
if raise_on_error:
raise Exception(error_message)
print(error_message)
return ok_merges, deletions
def merge_without_overwrite(source_meta, dest_meta_orig, raise_on_error=False):
"""Performs a safe metadata copy.
Here, "safe" means that no non-NaN values in `dest_meta` are
overwritten by the merge. If any overwrites occur an empty
dictionary is returned.
"""
# copy the original json params
dest_meta = deepcopy(dest_meta_orig)
if not source_meta.get("NSliceTimes") == dest_meta.get("NSliceTimes"):
if raise_on_error:
raise Exception("Value for NSliceTimes is %d in destination "
"but %d in source"
% (source_meta.get("NSliceTimes"),
source_meta.get("NSliceTimes")))
return {}
for parameter in DIRECT_IMAGING_PARAMS:
source_value = source_meta.get(parameter, nan)
dest_value = dest_meta.get(parameter, nan)
# cannot merge num --> num
# exception should only be raised
# IF someone tries to replace a num (dest)
# with a num (src)
if not is_nan(source_value):
# need to figure out if we can merge
if not is_nan(dest_value) and source_value != dest_value:
if raise_on_error:
raise Exception("Value for %s is %s in destination "
"but %s in source"
% (parameter, str(dest_value),
str(source_value)))
return {}
dest_meta[parameter] = source_value
return dest_meta
def is_nan(val):
'''Returns True if val is nan'''
if not isinstance(val, float):
return False
return isnan(val)
def print_merges(merge_list):
"""Print formatted text of merges"""
return "\n\t" + "\n\t".join(
["%s \n\t\t-> %s" % ("%s:%d" % src_id[::-1],
"%s:%d" % dest_id[::-1]) for
src_id, dest_id in merge_list])
def merge_json_into_json(from_file, to_file,
raise_on_error=False):
print("Merging imaging metadata from %s to %s"
% (from_file, to_file))
with open(from_file, "r") as fromf:
source_metadata = json.load(fromf)
with open(to_file, "r") as tof:
dest_metadata = json.load(tof)
orig_dest_metadata = deepcopy(dest_metadata)
merged_metadata = merge_without_overwrite(
source_metadata, dest_metadata, raise_on_error=raise_on_error)
if not merged_metadata:
return 255
# Only write if the data has changed
if not merged_metadata == orig_dest_metadata:
print("OVERWRITING", to_file)
with open(to_file, "w") as tofw:
json.dump(merged_metadata, tofw, indent=4)
return 0
def group_by_acquisition_sets(files_csv, output_prefix, acq_group_level):
'''Finds unique sets of Key/Param groups across subjects.
'''
from bids.layout import parse_file_entities
from bids import config
config.set_option('extension_initial_dot', True)
files_df = | pd.read_csv(files_csv) | pandas.read_csv |
from copy import deepcopy
import requests
import os
import bs4
from openpyxl import load_workbook
import pandas as pd
from ..helpers.db_funcs import get_ep_id_by_number, get_season_id_by_number_type
from ..helpers.extract_helpers import search_for_new_seasons
import glob
import re
import numpy as np
DOCS_URL_TEMPLATE = 'https://docs.google.com/spreadsheets/d/{id}/export?format=xlsx&id={id}'
SURVIVOR_SOURCE = 'https://www.truedorktimes.com/survivor/boxscores/data.htm'
def create_data_dict(subset=None):
ret_dict = {}
sp = bs4.BeautifulSoup(requests.get(SURVIVOR_SOURCE).content)
cast_elements = sp.find_all('ul', attrs={'class': 'cast'})
for e in cast_elements:
attrs = e.find('a').attrs
try:
if 'spreadsheet' in attrs['href']:
v = attrs['href'][:-1].split('/')[-1]
k = str(e.text.lower())
for p in '- ':
k = k.replace(p, '_')
for p in ':.-,':
k = k.replace(p, '')
k = k.replace('\n', '')[1:]
if subset:
if k.split('_')[0] not in subset:
continue
ret_dict[k] = v
else:
pass
except KeyError:
pass
return ret_dict
def save_survivor_excel(sheets_id, readable_name, dest_folder='../data/raw'):
url = DOCS_URL_TEMPLATE.format(**dict(id=sheets_id))
f_name = '{readable_name}.xlsx'.format(readable_name=readable_name)
req = requests.get(url)
with open(os.path.join(dest_folder, f_name), 'wb') as f:
f.write(req.content)
req.close()
def pull_and_save_excels(data_dict=None, subset=None, dest_folder='../data/raw'):
if not data_dict:
data_dict = create_data_dict(subset=subset)
for k, v in data_dict.items():
save_survivor_excel(v, k, dest_folder=dest_folder)
# Above is for the actual excels...
def empty_cond(ws, cell, *args, **kwargs):
return not cell.value
def vertical_cond_tc(ws, cell, *args, **kwargs):
return empty_cond(ws, cell) or (cell.value == 'wanda')
def any_cond(ws, cell):
return False
def rc_horizontal_cond(ws, cell, col_names, *args, **kwargs):
above = ws.cell(row=cell.row - 1, column=cell.column).value
if isinstance(above, str):
ic_bool = ('IC' in above) or (
'Immunity challenge' in above) or ('RC' in above)
ic_bool = ic_bool and (len(col_names) != 0)
else:
ic_bool = False
return (not cell.value) or (ic_bool)
def ep_horizontal_cond(ws, cell, col_names, nblanks=5, *args, **kwargs):
add_numbers = [x for x in range(1, nblanks + 1)]
right_two = [not ws.cell(
row=cell.row, column=cell.column + i).value for i in add_numbers]
return all(right_two) and not cell.value
def normal_extract_values(ws, row, column_start, width, *args, **kwargs):
return pd.Series([ws.cell(row=row, column=column_start + i + 1).value for i in range(width)])
def vote_extract_values(ws, row, column_start, width, col_names, *args, **kwargs):
values = normal_extract_values(ws, row, column_start, width)
values = pd.Series([c for i, c in enumerate(
col_names) if pd.notnull(values[i])])
return values if len(values) > 0 else pd.Series([None])
def identity_pp(df, col_names, *args, **kwargs):
df.columns = col_names
df = df.loc[:, ~df.columns.duplicated()]
return df
def ep_pp(df, col_names, *args, **kwargs):
df = identity_pp(df, col_names, *args, **kwargs)
df = df[df.columns[~df.columns.isna()]]
return df
def vote_pp(df, col_names, *args, **kwargs):
if df.shape[1] > 1:
df = pd.DataFrame(pd.concat([df[col] for col in df]))
df.columns = ['voted_for']
df['vote_counted'] = ~df['voted_for'].isna()
df = df.loc[:, ~df.columns.duplicated()]
return df
def extract_subtable(ws, start_row, start_column, index_column=None, horizontal_condition=None,
vertical_condition=None, extract_values=None, postprocess=None):
if not horizontal_condition:
horizontal_condition = empty_cond
if not vertical_condition:
vertical_condition = empty_cond
if not extract_values:
extract_values = normal_extract_values
if not postprocess:
postprocess = identity_pp
row = start_row
col = start_column
col_names = []
rows = []
idx = []
while True:
cell = ws.cell(row=row, column=col)
v = cell.value
if horizontal_condition(ws, cell, col_names):
break
else:
col_names.append(v)
col += 1
n_voted_against = len(col_names)
col -= (n_voted_against + 1)
row += 1
while True:
idx_cell = ws.cell(
row=row, column=index_column if index_column else col)
if vertical_condition(ws, idx_cell):
break
else:
values = extract_values(ws, row, col, n_voted_against, col_names)
rows.append(values)
idx.append(idx_cell.value)
row += 1
df = pd.DataFrame(rows)
df.index = idx
df.index.name = 'contestant'
df = postprocess(df, col_names)
df.reset_index(inplace=True)
return df
def extract_rc_challenge(ws, c_row, c_column):
return extract_subtable(ws, c_row + 1, c_column, horizontal_condition=rc_horizontal_cond, index_column=1)
def extract_ic_challenge(ws, c_row, c_column):
return extract_subtable(ws, c_row + 1, c_column, horizontal_condition=rc_horizontal_cond, index_column=1)
def extract_tc(ws, c_row, c_column):
return extract_subtable(ws, c_row + 1, c_column, vertical_condition=vertical_cond_tc, extract_values=vote_extract_values, postprocess=vote_pp, index_column=1)
def extract_ep(ws, c_row, c_column):
return extract_subtable(ws, c_row, c_column, index_column=1, horizontal_condition=ep_horizontal_cond, postprocess=ep_pp)
def append_tc_data(df, ws, cell):
v = cell.value
try:
total_players = int(re.search('F(\d+)', v).group(1))
except AttributeError:
if 'No' in v:
return pd.DataFrame()
elif 'Morgan' in v:
total_players = 10
elif 'Drake' in v:
total_players = 9
elif 'Mokuta' in v:
total_players = 18
elif 'Vakama' in v:
total_players = 17
else:
raise
episode = ws.title[1:]
new_df = extract_tc(ws, cell.row, cell.column)
new_df['total_players_remaining'] = total_players
new_df['episode'] = int(re.match('(\d+).*', episode).group(1))
df = pd.concat([df, new_df], ignore_index=True)
return df
def append_challenge_data(df, ws, cell, challenge_type):
search = re.search('F(\d+)', cell.value)
if not search:
# We don't have information about the "final" amount, so we don't fill this in
final = None
else:
final = int(search.group(1))
if challenge_type == 'RC':
extract_f = extract_rc_challenge
elif challenge_type == 'IC':
extract_f = extract_ic_challenge
else:
raise ValueError
episode = ws.title[1:]
new_df = extract_f(ws, cell.row, cell.column)
new_df['total_players_remaining'] = final
new_df['episode'] = int(re.match('(\d+).*', episode).group(1))
try:
df = pd.concat([df, new_df], ignore_index=True)
except:
import pdb
pdb.set_trace()
return df
def append_episode_data(df, ws, cell):
episode = ws.title[1:]
new_df = extract_ep(ws, cell.row, cell.column)
new_df['episode'] = int(re.match('(\d+).*', episode).group(1))
df = pd.concat([df, new_df], ignore_index=True)
return df
def append_rc_data(df, ws, cell):
return append_challenge_data(df, ws, cell, 'RC')
def append_ic_data(df, ws, cell):
return append_challenge_data(df, ws, cell, 'IC')
def ic_transform(df, full_name_dict_to_id):
df['win'] = df['win?']
if 1 in df:
df['win'] = df['win?'].fillna(df[1])
if 'win? ' in df:
df['win'] = df['win'].fillna(df['win? '])
if 'sitout' in df:
df['sitout'] = df['SO'].fillna('sitout')
else:
df['sitout'] = df['SO']
merge_key = df['contestant'].str.replace(
' ', '_') + '_' + df['season_id'].astype(str)
df['contestant_id'] = merge_key.map(full_name_dict_to_id)
rel_columns = df.columns[df.columns.isin(
['win?', 'contestant', 'SO', 'win? '])]
df.rename(columns={'#people': 'team', 'win%': 'win_pct',
'total win%': 'episode_win_pct'}, inplace=True)
df.drop(columns=rel_columns, inplace=True)
df = df[df['win'].notnull()].reset_index(drop=True)
return df
def extract_episodal_data(ws):
results = {'tribal_council': | pd.DataFrame() | pandas.DataFrame |
import sklearn.cluster
from scipy.stats import zscore
from matplotlib.patches import Patch
import gseapy as gp
import numpy as np
import pandas as pd
import sys
import scanpy as sc
def get_genelist_references(reference_file_path = "../../Data/",gene_sets=["GO_Biological_Process_2021"]):
genelist_references = {}
for s in gene_sets:
genelist_references[s] = {}
genelist_reference_file = open(reference_file_path+s+".txt")
for l in genelist_reference_file:
m = l.split("\t")
genelist_references[s][m[0]] = m[1:]
return genelist_references
def make_ordered_exp(epi_celltype_exp,celltypes,metadata,adata,celltype_col="celltype",lognorm=True,filter_expression=True):
if type(celltypes)!=list:
celltypes = [celltypes]
exp = epi_celltype_exp[epi_celltype_exp[celltype_col].isin(celltypes)]
exp.index=exp["sample"]
exp = exp.iloc[:,2:]
#exp =exp.dropna()
# map expression to time post partum metadata
exp["time_post_partum_days"] = exp.index.map(metadata["time_post_partum_days"])
exp = exp.loc[exp["time_post_partum_days"]<400]
exp = exp.iloc[:,:-1]
exp=exp.loc[adata.obs[adata.obs["Epithelial Cell Subclusters"].isin(celltypes)].groupby(["sample"]).count().loc[exp.index,"phase"] > 10]
# remove genes not expressed
exp=exp.loc[:,exp.sum(axis=0)>0]
if lognorm:
#sample normalize
exp_norm = exp.div(exp.sum(axis=1),axis=0)*1000
# log
#exp_log=np.log(exp+1)
exp_lognorm = np.log(exp_norm+1)
#order exp by time post partum
else:
exp_lognorm = exp
ordered_exp = exp_lognorm.iloc[exp_lognorm.index.map(metadata["time_post_partum_days"]).argsort()]
return exp,ordered_exp
def heatmap_and_clusters_by_time(epi_celltype_exp, des_res, celltype,metadata,adata,minlfc=0.005,minmean=20,vmax=3,vmin=-2, min_pts = .1):
directory = "time_series_heatmaps/"
exp,ordered_exp = make_ordered_exp(epi_celltype_exp, celltype,metadata,adata)
if "rank_genes_groups" not in adata_all_epi.uns or adata_all_epi.uns["rank_genes_groups"]["params"]["groupby"] != "Epithelial Cell Subclusters" or "pts" not in adata_all_epi.uns["rank_genes_groups"]:
sc.tl.rank_genes_groups(adata_all_epi, groupby="Epithelial Cell Subclusters", pts=True)
des_res_reduced = des_res.loc[des_res["padj"]<.05]
des_res_reduced = des_res_reduced.loc[des_res_reduced["log2FoldChange"].abs()>minlfc]
des_res_reduced = des_res_reduced.loc[des_res_reduced["baseMean"].abs()>minmean]
#g = [i.replace(".","_") for i in des_res_reduced.index]
overlap_genes = list(set(des_res_reduced.index).intersection(set(adata_all_epi.uns["rank_genes_groups"]["pts"].index)))
#des_res_reduced.index = [i.replace(".","_") for i in des_res_reduced.index]
des_res_reduced = des_res_reduced.loc[overlap_genes]
des_res_reduced["pts"] = adata_all_epi.uns["rank_genes_groups"]["pts"].loc[des_res_reduced.index,celltype]
des_res_reduced = des_res_reduced.loc[des_res_reduced["pts"]>min_pts]
genes=[i for i in des_res_reduced.sort_values('log2FoldChange').index if i in ordered_exp.columns]
#zscore each column
z=ordered_exp.apply(zscore)
n_clusters = 5
labels = sklearn.cluster.KMeans(n_clusters=n_clusters).fit_predict(z.T.loc[genes])
new_gene_order=reorder_from_labels(labels,genes)
lut=dict(zip(list(set(labels)),("r","g","y","m","k")))
row_colors=[lut[i] for i in labels]
row_color_order = reorder_from_labels(labels,row_colors)
exp.iloc[exp.index.map(metadata["time_post_partum_days"]).argsort()][new_gene_order].to_csv(directory+celltype+"_reduced_pseudobulk_expression_for_heatmap_raw.csv")
col_colors=ordered_exp.index.map(metadata["milk_stage"]).map(hh.milk_stage_colors)
ordered_exp[new_gene_order].to_csv(directory+celltype+"_reduced_pseudobulk_expression_for_heatmap_lognormed.csv")
| pd.DataFrame(labels, index=genes) | pandas.DataFrame |
#!/usr/bin/env python3
from asyncore import loop
import math
import datetime
import argparse
from pkgutil import get_data
import shutil
from webbrowser import get
from numpy import fft
import urllib3
import argparse
import requests
import re
import os
import pandas as pd
import urllib3
from alive_progress import alive_bar
from bs4 import BeautifulSoup
from colorama import Fore, Style
from datetime import date
from geographiclib.geodesic import Geodesic
work_dir = os.getcwd()
# pandas init
dfColumns = ['name', 'callsign', 'frequency', 'boundary', 'upper_fl', 'lower_fl', 'class']
df_fir = pd.DataFrame(columns=dfColumns)
df_uir = pd.DataFrame(columns=dfColumns)
df_cta = pd.DataFrame(columns=dfColumns)
df_tma = pd.DataFrame(columns=dfColumns)
df_ctr = pd.DataFrame(columns=dfColumns)
df_atz = pd.DataFrame(columns=dfColumns)
dfEnr05 = pd.DataFrame(columns=dfColumns)
class Airac:
"""Class for general functions relating to AIRAC"""
def __init__(self):
"""First AIRAC date following the last cycle length modification"""
startDate = "2019-01-02"
self.baseDate = date.fromisoformat(str(startDate))
# Length of one AIRAC cycle
self.cycleDays = 28
def initialise(self, dateIn=0):
"""Calculate the number of AIRAC cycles between any given date and the start date"""
if dateIn:
inputDate = date.fromisoformat(str(dateIn))
else:
inputDate = date.today()
# How many AIRAC cycles have occured since the start date
diffCycles = (inputDate - self.baseDate) / datetime.timedelta(days=1)
# Round that number down to the nearest whole integer
numberOfCycles = math.floor(diffCycles / self.cycleDays)
return numberOfCycles
def currentCycle(self):
"""Return the date of the current AIRAC cycle"""
numberOfCycles = self.initialise()
numberOfDays = numberOfCycles * self.cycleDays + 1
return self.baseDate + datetime.timedelta(days=numberOfDays)
def nextCycle(self):
"""Return the date of the next AIRAC cycle"""
numberOfCycles = self.initialise()
numberOfDays = (numberOfCycles + 1) * self.cycleDays + 1
return self.baseDate + datetime.timedelta(days=numberOfDays)
def url(self, next=0):
"""Return a generated URL based on the AIRAC cycle start date"""
baseUrl = "https://www.aurora.nats.co.uk/htmlAIP/Publications/"
if next:
baseDate = self.nextCycle() # if the 'next' variable is passed, generate a URL for the next AIRAC cycle
else:
baseDate = self.currentCycle()
basePostString = "-AIRAC/html/eAIP/"
return baseUrl + str(baseDate) + basePostString
class Webscrape:
'''Class to scrape data from the given AIRAC eAIP URL'''
def __init__(self, next=0):
cycle = Airac()
self.cycle = cycle.currentCycle()
self.cycleUrl = cycle.url()
self.country = "EG"
def get_table_soup(self, uri):
"""Parse the given table into a beautifulsoup object"""
address = self.cycleUrl + uri
http = urllib3.PoolManager()
error = http.request("GET", address)
if (error.status == 404):
return 404
page = requests.get(address)
return BeautifulSoup(page.content, "lxml")
def cw_acw_helper(self, data_in, output_title, load=0):
"""creates a list of complex airspace areas with the direction of the arc for reference later on"""
dfColumns = ['area', 'number', 'direction']
complex_areas = pd.DataFrame(columns=dfColumns)
row = 0
complex_search_data = data_in.find_all("p") # find everything enclosed in <p></p> tags
complex_len = len(complex_search_data)
while row < complex_len:
title = re.search(r"id=\"ID_[\d]{8,10}\"\>([A-Z]*)\s(FIR|CTA|TMA|CTR)\s([0-9]{0,2})\<", str(complex_search_data[row]))
if title:
print_title = f"{str(title.group(1))} {str(title.group(2))} {str(title.group(3))}"
direction = re.findall(r"(?<=\s)(anti-clockwise|clockwise)(?=\s)", str(complex_search_data[row+1]))
if direction:
area_number = 0
for d in direction:
ca_out = {'area': print_title, 'number': str(area_number), 'direction': str(d)}
complex_areas = complex_areas.append(ca_out, ignore_index=True)
area_number += 1
row += 1
row += 1
if load == 1:
return complex_areas
else:
complex_areas.to_csv(f'{work_dir}\\DataFrames\{output_title}-CW-ACW-Helper.csv')
def circle_helper(self, data_in, output_title, load=0):
"""creates a list of complex airspace areas with the direction of the arc for reference later on"""
dfColumns = ['area', 'number', 'direction']
complex_areas = pd.DataFrame(columns=dfColumns)
row = 0
complex_search_data = data_in.find_all("p") # find everything enclosed in <p></p> tags
complex_len = len(complex_search_data)
while row < complex_len:
title = re.search(r"id=\"ID_[\d]{8,10}\"\>([A-Z]*)\s(ATZ)\<", str(complex_search_data[row]))
if title:
print_title = f"{str(title.group(1))} {str(title.group(2))}"
circle = re.findall(r"(?<=\s)(circle)(?=\,|\s)", str(complex_search_data[row+1]))
if circle:
ca_out = {'area': print_title, 'number': "0", 'direction': "circle"}
complex_areas = complex_areas.append(ca_out, ignore_index=True)
row += 1
row += 1
if load == 1:
return complex_areas
else:
complex_areas.to_csv(f'{work_dir}\\DataFrames\{output_title}-Circle-Helper.csv')
def airspace_parser(self, getData, ad217=0, df_fir=df_fir, df_uir=df_uir, df_cta=df_cta, df_tma=df_tma, df_ctr=df_ctr, df_atz=df_atz, df_danger=dfEnr05):
"""parse the airspace data from the given page"""
# scrape all the data and chuck it in an array
data_out = []
searchData = getData.find_all("tr")
for line in searchData:
for l in line.stripped_strings:
data_out.append(l)
# define some bits
stopstopstop = 0
df_atz_out = False
airspace = False
danger = False
row = 0
last_arc_title = False
arc_counter = 0
space = []
loop_coord = False
first_callsign = False
first_freq = False
frequency = "000.000"
upper_limit_out = "000"
lower_limit_out = "000"
airspace_class_out = "E"
count = 0
# actually do something with the data
while (count < len(data_out) and (stopstopstop == 0)):
data_to_wrangle = data_out[count]
title = re.search(r"TAIRSPACE;TXT_NAME", str(data_to_wrangle))
danger_area = re.search(r"TAIRSPACE;CODE_ID", str(data_to_wrangle))
coords = re.search(r"(?:TAIRSPACE_VERTEX;GEO_L(?:AT|ONG);)([\d]{4})", str(data_to_wrangle))
callsign = re.search(r"TUNIT;TXT_NAME", str(data_to_wrangle))
freq = re.search(r"TFREQUENCY;VAL_FREQ_TRANS", str(data_to_wrangle))
arc = re.search(r"TAIRSPACE_VERTEX;VAL_RADIUS_ARC", str(data_to_wrangle))
lat_arc = re.search(r"TAIRSPACE_VERTEX;GEO_LAT_ARC", str(data_to_wrangle))
lon_arc = re.search(r"TAIRSPACE_VERTEX;GEO_LONG_ARC", str(data_to_wrangle))
airspace_class = re.search(r"TAIRSPACE_LAYER_CLASS;CODE_CLASS", str(data_to_wrangle))
upper_limit = re.search(r"TAIRSPACE_VOLUME;VAL_DIST_VER_UPPER", str(data_to_wrangle))
lower_limit = re.search(r"TAIRSPACE_VOLUME;VAL_DIST_VER_LOWER", str(data_to_wrangle))
if title:
# get the printed title
print_title = str(data_out[count-1])
airspace = re.search(r"(FIR|UIR|CTA|TMA|CTR|ATZ|RMZ)", str(data_out[row-1]))
if airspace:
df_in_title = print_title
loop_coord = True
if danger_area and (stopstopstop == 0):
# get the danger area code
dac_d_p_r = re.search(r"(EG\s[D|P|R]{1}[\d]{3}[A-Z]*)", str(data_out[row-1]))
print(dac_d_p_r)
dac_ru = re.search(r"(EG\sRU[\d]{3}[A-Z]*)", str(data_out[row-1]))
print(dac_ru)
if dac_ru:
stopstopstop = 1
if dac_d_p_r:
airspace = dac_d_p_r
danger = True
danger_title = str(data_out[count+1])
loop_coord = True
if (callsign) and (first_callsign is False):
# get the first (and only the first) printed callsign
callsign_out = str(data_out[count-1])
first_callsign = True
if airspace_class:
# get airspace class
airspace_class_out = str(data_out[count-1])
if upper_limit:
# get airspace upper limit
upper_limit_out = str(data_out[count-1])
if lower_limit:
# get airspace lower limit
lower_limit_out = str(data_out[count-1])
if (freq) and (first_freq is False):
# get the first (and only the first) printed callsign
frequency = str(data_out[count-1])
first_freq = True
if arc: # what to do if an arc is found
# check to see if this a series, if so then increment the counter
if print_title == str(last_arc_title):
arc_counter += 0
else:
arc_counter == 0
# circle, clockwise or anti-clockwise arc?
circle = re.search(r"^[A|a]\sradius\,\scentred\sat", data_out[count-2])
circle2 = re.search(r"^[A|a]\scircle\,", data_out[count-2])
anti_clockwise = re.search("anti-clockwise", data_out[count-2])
clockwise = re.search(r"\sclockwise\s", data_out[count-2])
cacw = 0
if anti_clockwise:
cacw = 2
elif circle or circle2:
cacw = 3
elif clockwise:
cacw = 1
if cacw > 0:
radius = data_out[count-1]
# deal with radius in meters
if float(radius) > 100:
radius = float(radius) / 1000
if cacw < 3:
start_lon = re.search(r"([\d]{6,7})(E|W)", data_out[count-4])
start_lat = re.search(r"([\d]{6,7})(N|S)", data_out[count-6])
centre_lat = re.search(r"([\d]{6,7})(N|S)", data_out[count+4])
centre_lon = re.search(r"([\d]{6,7})(E|W)", data_out[count+6])
end_lat = re.search(r"([\d]{6,7})(N|S)", data_out[count+9])
end_lon = re.search(r"([\d]{6,7})(E|W)", data_out[count+11])
elif cacw == 3:
centre_lat = re.search(r"([\d]{6,7})(N|S)", data_out[count+4])
centre_lon = re.search(r"([\d]{6,7})(E|W)", data_out[count+6])
if (cacw < 3) and (centre_lat == None):
centre_lat = re.search(r"([\d]{6,7})(N|S)", data_out[count+2])
centre_lon = re.search(r"([\d]{6,7})(E|W)", data_out[count+4])
end_lat = re.search(r"([\d]{6,7})(N|S)", data_out[count+7])
end_lon = re.search(r"([\d]{6,7})(E|W)", data_out[count+9])
if (cacw < 3) and (end_lat == None):
end_lat = re.search(r"([\d]{6,7})(N|S)", data_out[count+11])
end_lon = re.search(r"([\d]{6,7})(E|W)", data_out[count+13])
# convert from dms to dd
if cacw == 3:
mid_dd = self.dms2dd(centre_lat.group(1), centre_lon.group(1), centre_lat.group(2), centre_lon.group(2))
start_dd = mid_dd
end_dd = mid_dd
else:
start_dd = self.dms2dd(start_lat.group(1), start_lon.group(1), start_lat.group(2), start_lon.group(2))
mid_dd = self.dms2dd(centre_lat.group(1), centre_lon.group(1), centre_lat.group(2), centre_lon.group(2))
end_dd = self.dms2dd(end_lat.group(1), end_lon.group(1), end_lat.group(2), end_lon.group(2))
if danger_area:
print(danger_title)
else:
print(print_title)
arc_out = self.generate_semicircle(float(mid_dd[0]), float(mid_dd[1]), float(start_dd[0]), float(start_dd[1]), float(end_dd[0]), float(end_dd[1]), cacw, float(radius))
for coord in arc_out:
space.append(coord)
# store the last arc title to compare against
last_arc_title = str(print_title)
if coords:
loop_coord = False
# get the coordinate
print_coord = re.findall(r"([\d]{6,7})(N|S|E|W)", str(data_out[count-1]))
if print_coord:
space.append(print_coord[0])
if (ad217 == 1) and (loop_coord):
# for looping through AD2.17 aerodrome
first_callsign = True
callsign_out = print_title
elif (ad217 == 2) and (loop_coord):
# for danger areas
first_callsign = True
callsign_out = danger_title
if (loop_coord) and (space != []) and (first_callsign):
def coord_to_table(last_df_in_title, callsign_out, frequency, output, upper_limit, lower_limit, airspace_class):
df_out = {
'name': last_df_in_title,
'callsign': callsign_out,
'frequency': str(frequency),
'boundary': str(output),
'upper_fl': str(upper_limit),
'lower_fl': str(lower_limit),
'class': str(airspace_class)
}
return df_out
output = self.getBoundary(space)
if airspace:
# for FIRs do this
if last_airspace.group(1) == "FIR":
df_fir_out = coord_to_table(last_df_in_title, callsign_out, frequency, output, upper_limit_out, lower_limit_out, airspace_class_out)
df_fir = df_fir.append(df_fir_out, ignore_index=True)
# for UIRs do this - same extent as FIR
#if last_airspace.group(1) == "UIR":
# df_uir_out = {'name': last_df_in_title,'callsign': callsign_out,'frequency': str(frequency), 'boundary': str(output), 'upper_fl': '000', 'lower_fl': '000'}
# df_uir = df_uir.append(df_uir_out, ignore_index=True)
# for CTAs do this
if last_airspace.group(1) == "CTA":
df_cta_out = coord_to_table(last_df_in_title, callsign_out, frequency, output, upper_limit_out, lower_limit_out, airspace_class_out)
df_cta = df_cta.append(df_cta_out, ignore_index=True)
if last_airspace.group(1) == "TMA":
df_tma_out = coord_to_table(last_df_in_title, callsign_out, frequency, output, upper_limit_out, lower_limit_out, airspace_class_out)
df_tma = df_tma.append(df_tma_out, ignore_index=True)
if last_airspace.group(1) == "CTR":
df_ctr_out = coord_to_table(last_df_in_title, callsign_out, frequency, output, upper_limit_out, lower_limit_out, airspace_class_out)
df_ctr = df_ctr.append(df_ctr_out, ignore_index=True)
if (last_airspace.group(1) == "ATZ") or (last_airspace.group(1) == "RMZ") or (last_airspace.group(1) == "CTR"):
df_atz_out = coord_to_table(last_df_in_title, callsign_out, frequency, output, upper_limit_out, lower_limit_out, airspace_class_out)
df_atz = df_atz.append(df_atz_out, ignore_index=True)
if danger:
df_danger_out = coord_to_table(last_df_in_title, False, False, output, upper_limit_out, lower_limit_out, False)
df_danger = df_danger.append(df_danger_out, ignore_index=True)
space = []
loop_coord = True
first_callsign = False
first_freq = False
if airspace:
if danger:
last_df_in_title = danger_title
else:
last_df_in_title = print_title
last_airspace = airspace
row += 1
count += 1
df_uir = df_fir
if (ad217 == 1):
return [df_fir, df_uir, df_cta, df_tma, df_ctr, df_atz_out, df_danger]
else:
return [df_fir, df_uir, df_cta, df_tma, df_ctr, df_atz, df_danger]
def parse_ad01_data(self):
"""Parse the data from AD-0.1"""
print("Parsing "+ self.country +"-AD-0.1 data to obtain ICAO designators...")
# create the table
dfColumns = ['icao_designator','verified','location','elevation','name','magnetic_variation']
df = pd.DataFrame(columns=dfColumns)
# scrape the data
getAerodromeList = self.get_table_soup(self.country + "-AD-0.1-en-GB.html")
# process the data
listAerodromeList = getAerodromeList.find_all("h3")
barLength = len(listAerodromeList)
with alive_bar(barLength) as bar: # Define the progress bar
for row in listAerodromeList:
# search for aerodrome icao designator and name
getAerodrome = re.search(rf"({self.country}[A-Z]{{2}})(\n[\s\S]{{7}}\n[\s\S]{{8}})([A-Z]{{4}}.*)(\n[\s\S]{{6}}<\/a>)", str(row))
if getAerodrome:
# Place each aerodrome into the DB
dfOut = {'icao_designator': str(getAerodrome[1]),'verified': 0,'location': 0,'elevation': 0,'name': str(getAerodrome[3]),'magnetic_variation': 0}
df = df.append(dfOut, ignore_index=True)
bar()
return df
def parse_ad02_data(self, dfAd01):
"""Parse the data from AD-2.x"""
print("Parsing "+ self.country +"-AD-2.x data to obtain aerodrome data...")
df_columns_rwy = ['icao_designator','runway','location','elevation','bearing','length']
df_rwy = pd.DataFrame(columns=df_columns_rwy)
df_columns_srv = ['icao_designator','callsign_type','frequency']
df_srv = pd.DataFrame(columns=df_columns_srv)
# Select all aerodromes in the database
barLength = len(dfAd01.index)
with alive_bar(barLength) as bar: # Define the progress bar
for index, row in dfAd01.iterrows():
aeroIcao = row['icao_designator']
# Select all runways in this aerodrome
getRunways = self.get_table_soup(self.country + "-AD-2."+ aeroIcao +"-en-GB.html")
if getRunways !=404:
print(" Parsing AD-2 data for " + aeroIcao)
aerodromeAd0202 = getRunways.find(id=aeroIcao + "-AD-2.2")
aerodromeAd0212 = getRunways.find(id=aeroIcao + "-AD-2.12")
aerodromeAd0218 = getRunways.find(id=aeroIcao + "-AD-2.18")
# Find current magnetic variation for this aerodrome
aerodromeMagVar = self.search("([\d]{1}\.[\d]{2}).([W|E]{1})", "TAD_HP;VAL_MAG_VAR", str(aerodromeAd0202))
pM = self.plusMinus(aerodromeMagVar[0][1])
floatMagVar = pM + aerodromeMagVar[0][0]
# Find lat/lon/elev for aerodrome
aerodromeLat = re.search(r'(Lat: )(<span class="SD" id="ID_[\d]{7}">)([\d]{6})([N|S]{1})', str(aerodromeAd0202))
aerodromeLon = re.search(r"(Long: )(<span class=\"SD\" id=\"ID_[\d]{7}\">)([\d]{7})([E|W]{1})", str(aerodromeAd0202))
aerodromeElev = re.search(r"(VAL_ELEV\;)([\d]{1,4})", str(aerodromeAd0202))
full_location = self.sct_location_builder(
aerodromeLat.group(3),
aerodromeLon.group(3),
aerodromeLat.group(4),
aerodromeLon.group(4)
)
dfAd01.at[index, 'verified'] = 1
dfAd01.at[index, 'magnetic_variation'] = str(floatMagVar)
dfAd01.at[index, 'location'] = str(full_location)
dfAd01.at[index, 'elevation'] = str(aerodromeElev[2])
# Find runway locations
aerodromeRunways = self.search("([\d]{2}[L|C|R]?)", "TRWY_DIRECTION;TXT_DESIG", str(aerodromeAd0212))
#print(aerodromeRunways)
aerodromeRunwaysLat = self.search("([\d]{6}[\.]?[\d]{0,2}[N|S]{1})", "TRWY_CLINE_POINT;GEO_LAT", str(aerodromeAd0212))
#print(aerodromeRunwaysLat)
aerodromeRunwaysLong = self.search("([\d]{7}[\.]?[\d]{0,2}[E|W]{1})", "TRWY_CLINE_POINT;GEO_LONG", str(aerodromeAd0212))
#print(aerodromeRunwaysLong)
aerodromeRunwaysElev = self.search("([\d]{1,3}\.[\d]{1})", "TRWY_CLINE_POINT;VAL_ELEV", str(aerodromeAd0212))
#print(aerodromeRunwaysElev)
aerodromeRunwaysBearing = self.search("([\d]{3}\.[\d]{2}.)", "TRWY_DIRECTION;VAL_TRUE_BRG", str(aerodromeAd0212))
#print(aerodromeRunwaysBearing)
aerodromeRunwaysLen = self.search("([\d]{3,4})", "TRWY;VAL_LEN", str(aerodromeAd0212))
#print(aerodromeRunwaysLen)
for rwy, lat, lon, elev, brg, rwyLen in zip(aerodromeRunways, aerodromeRunwaysLat, aerodromeRunwaysLong, aerodromeRunwaysElev, aerodromeRunwaysBearing, aerodromeRunwaysLen):
# Add runway to the aerodromeDB
latSplit = re.search(r"([\d]{6})(\.[\d]{2})?([N|S]{1})", str(lat))
lonSplit = re.search(r"([\d]{7})(\.[\d]{2})?([E|W]{1})", str(lon))
if latSplit.group(2) is None:
printer_la = latSplit.group(1)
else:
printer_la = latSplit.group(1) + latSplit.group(2)
if lonSplit.group(2) is None:
printer_lo = lonSplit.group(1)
else:
printer_lo = lonSplit.group(1) + lonSplit.group(2)
loc = self.sct_location_builder(
printer_la,
printer_lo,
latSplit.group(3),
lonSplit.group(3)
)
df_rwy_out = {'icao_designator': str(aeroIcao),'runway': str(rwy),'location': str(loc),'elevation': str(elev),'bearing': str(brg.rstrip('°')),'length': str(rwyLen)}
#print(df_rwy_out)
df_rwy = df_rwy.append(df_rwy_out, ignore_index=True)
# Find air traffic services
aerodromeServices = self.search("(APPROACH|GROUND|DELIVERY|TOWER|DIRECTOR|INFORMATION|RADAR|RADIO|FIRE|EMERGENCY)", "TCALLSIGN_DETAIL", str(aerodromeAd0218))
serviceFrequency = self.search("([\d]{3}\.[\d]{3})", "TFREQUENCY", str(aerodromeAd0218))
last_srv = ''
if len(aerodromeServices) == len(serviceFrequency):
# Simple aerodrome setups with 1 job, 1 frequency
for srv, frq in zip(aerodromeServices, serviceFrequency):
if str(srv) is None:
s_type = last_srv
else:
s_type = str(srv)
last_srv = s_type
df_srv_out = {'icao_designator': str(aeroIcao),'callsign_type': s_type,'frequency': str(frq)}
df_srv = df_srv.append(df_srv_out, ignore_index=True)
else:
# Complex aerodrome setups with multiple frequencies for the same job
print(Fore.BLUE + " Aerodrome " + aeroIcao + " has a complex comms structure" + Style.RESET_ALL)
for row in aerodromeAd0218.find_all("span"):
# get the full row and search between two "TCALLSIGN_DETAIL" objects
table_row = re.search(r"(APPROACH|GROUND|DELIVERY|TOWER|DIRECTOR|INFORMATION|RADAR|RADIO|FIRE|EMERGENCY)", str(row))
if table_row is not None:
callsign_type = table_row.group(1)
freq_row = re.search(r"([\d]{3}\.[\d]{3})", str(row))
if freq_row is not None:
frequency = str(freq_row.group(1))
if frequency != "121.500": # filter out guard frequencies
df_srv_out = {'icao_designator': str(aeroIcao),'callsign_type': callsign_type,'frequency': frequency}
df_srv = df_srv.append(df_srv_out, ignore_index=True)
else:
print(Fore.RED + "Aerodrome " + aeroIcao + " does not exist" + Style.RESET_ALL)
bar()
return [dfAd01, df_rwy, df_srv]
def parse_ad0217_data(self, dfAd01): # re-write of this section has been completed
"""This will parse airspace data from AD 2.17 for each aerodrome"""
print("Parsing "+ self.country +"-AD-2.17 Data (AIR TRAFFIC SERVICES AIRSPACE)...")
dfColumns = ['name', 'callsign', 'frequency', 'boundary', 'upper_fl', 'lower_fl', 'class']
df_atz = pd.DataFrame(columns=dfColumns)
# Select all aerodromes in the database
for index, rowu in dfAd01.iterrows():
aeroIcao = rowu['icao_designator']
# Select all runways in this aerodrome
getAerodromes = self.get_table_soup(self.country + "-AD-2."+ aeroIcao +"-en-GB.html")
if getAerodromes !=404:
print(" Parsing AD-2.17 data for " + aeroIcao)
#getData = self.get_table_soup(self.country + "-ENR-2.1-en-GB.html")
getData = getAerodromes.find(id=aeroIcao + "-AD-2.17")
output = self.airspace_parser(getData, 1)
if output[5] != False:
df_atz = df_atz.append(output[5], ignore_index=True)
return df_atz
def parse_enr016_data(self, dfAd01):
"""Parse the data from ENR-1.6"""
print("Parsing "+ self.country + "-ENR-1.6 data to obtan SSR code allocation plan")
dfColumns = ['start','end','depart','arrive', 'string']
df = pd.DataFrame(columns=dfColumns)
webpage = self.get_table_soup(self.country + "-ENR-1.6-en-GB.html")
getDiv = webpage.find("div", id = "ENR-1.6.2.6")
getTr = getDiv.find_all('tr')
barLength = len(getTr)
with alive_bar(barLength) as bar: # Define the progress bar
for row in getTr:
getP = row.find_all('p')
if len(getP) > 1:
text = re.search(r"([\d]{4})...([\d]{4})", getP[0].text) # this will just return ranges and ignore all discreet codes in the table
if text:
start = text.group(1)
end = text.group(2)
# create an array of words to search through to try and match code range to destination airport
locArray = getP[1].text.split()
for loc in locArray:
strip = re.search(r"([A-Za-z]{3,10})", loc)
if strip:
dep = "EG\w{2}"
# search the dataframe containing icao_codes
name = dfAd01[dfAd01['name'].str.contains(strip.group(1), case=False, na=False)]
if len(name.index) == 1:
dfOut = {'start': start,'end': end,'depart': dep,'arrive': name.iloc[0]['icao_designator'],'string': strip.group(1)}
df = df.append(dfOut, ignore_index=True)
elif strip.group(1) == "RAF" or strip.group(1) == "Military" or strip.group(1) == "RNAS" or strip.group(1) == "NATO":
dfOut = {'start': start,'end': end,'depart': dep,'arrive': 'Military','string': strip.group(1)}
df = df.append(dfOut, ignore_index=True)
elif strip.group(1) == "Transit":
dfOut = {'start': start,'end': end,'depart': dep,'arrive': locArray[2],'string': strip.group(1)}
df = df.append(dfOut, ignore_index=True)
bar()
return(df)
def parse_enr021_data(self): # re-write of this section has been completed
"""This will parse ENR 2 data from the given AIP"""
print("Parsing "+ self.country +"-ENR-2.1 Data (FIR, UIR, TMA AND CTA)...")
getData = self.get_table_soup(self.country + "-ENR-2.1-en-GB.html")
output = self.airspace_parser(getData)
return [output[0], output[1], output[2], output[3]]
def parse_enr022_data(self): # re-write of this section has been completed
"""This will parse ENR 2.2 data from the given AIP"""
print("Parsing "+ self.country +"-ENR-2.2 Data (OTHER REGULATED AIRSPACE)...")
getData = self.get_table_soup(self.country + "-ENR-2.2-en-GB.html")
output = self.airspace_parser(getData)
return output[5]
def acc_uac_control_sectors(self):
xlsx_file = f"{work_dir}\\ACC_UAC_Sectors.csv"
part_number = False
dfColumns = ['name', 'boundary']
df = pd.DataFrame(columns=dfColumns)
def wrangler(data, sector, df):
"""sorts out all of the random coords into something useful"""
latlon = ""
if data != "":
pair = re.findall(r"([0-9]{6})([N|S])[\,\s\.\n]?([0-9]{7})([E|W])", data)
for p in pair:
lat = p[0]
lon = p[2]
ns = p[1]
ew = p[3]
q = self.sct_location_builder(lat, lon, ns, ew)
latlon = f"{latlon}/{q}"
latlon = latlon.lstrip("/")
dfOut = {'name': str(sector), 'boundary': str(latlon)}
print(dfOut)
return dfOut
coord_full = ""
area = False
sector = False
with open(xlsx_file) as read_file:
for line in read_file:
split_line = line.split(',')
area_name = re.match(r"[A-Z]{3,}\sAC", line) # area name
sector_name = re.match(r"[A-Z]{3,}", split_line[0]) # sector name
sector_number = re.match(r"[0-9]{1,2}\,\,", line) # sector name
if len(split_line) > 3:
part_number = re.match(r"[0-9]{1}", split_line[2]) # get sector part number
upper_fl = re.match(r"[FL\s]{2,4}[\d\s]{1,5}", split_line[3]) # get upper flight level
if not upper_fl:
upper_fl = re.match(r"[FL\s]{2,4}[\d\s]{2,5}", split_line[0]) # get upper flight level
# find area name
if area_name:
dfOut = wrangler(coord_full, area, df)
df = df.append(dfOut, ignore_index=True)
coord_full = ""
area = split_line[0]
elif sector_name or sector_number:
# sector name or number
if part_number:
dfOut = wrangler(coord_full, sector, df)
df = df.append(dfOut, ignore_index=True)
coord_full = ""
sector = f"{area} {split_line[0]} {part_number[0]}"
else:
sector = f"{area} {split_line[0]}"
if upper_fl:
print_txt = split_line[3].replace(" ", "")
#print(f"\t\t{print_txt}")
elif upper_fl:
print_txt = upper_fl[0].replace(" ", "")
#print(f"\t\t{print_txt}")
for splt in split_line:
clear_spaces = splt.replace(" ", "").replace("O", "0").replace("l", "1").replace("I", "1")
with open("out.txt", "a") as file:
file.write(clear_spaces)
nav = re.findall(r"[0-9OlIi]{5,8}[\-]?[N|S|E|W]", clear_spaces)
for coord in nav:
ns_check = re.match(r"[0-9]{6}[N|S]", coord)
we_check = re.match(r"[0-9]{7}[W|E]", coord)
if (not ns_check) and (not we_check):
ns_check_s2 = re.search(r"([0-9]{5})([N|S])", coord)
ew_check_s2 = re.search(r"([0-9]{6})([W|E])", coord)
ew_check_s3 = re.search(r"([0-9]{5})([W|E])", coord)
if ns_check_s2:
coord = f"{ns_check_s2[0]}0{ns_check_s2[1]}"
elif ew_check_s2:
coord = f"{ew_check_s2[0]}0{ew_check_s2[1]}"
elif ew_check_s3:
coord = f"{ew_check_s3[0]}00{ew_check_s3[1]}"
else:
coord = input(f"Error spotted with {coord}, please enter correct value: ")
coord_full = f"{coord_full} {coord}"
return df
def parse_enr03_data(self, section):
dfColumns = ['name', 'route']
dfEnr03 = pd.DataFrame(columns=dfColumns)
print("Parsing "+ self.country +"-ENR-3."+ section +" data to obtain ATS routes...")
getENR3 = self.get_table_soup(self.country + "-ENR-3."+ section +"-en-GB.html")
listTables = getENR3.find_all("tbody")
barLength = len(listTables)
with alive_bar(barLength) as bar: # Define the progress bar
for row in listTables:
getAirwayName = self.search("([A-Z]{1,2}[\d]{1,4})", "TEN_ROUTE_RTE;TXT_DESIG", str(row))
getAirwayRoute = self.search("([A-Z]{3,5})", "T(DESIGNATED_POINT|DME|VOR|NDB);CODE_ID", str(row))
printRoute = ''
if getAirwayName:
for point in getAirwayRoute:
printRoute += str(point[0]) + "/"
dfOut = {'name': str(getAirwayName[0]), 'route': str(printRoute).rstrip('/')}
dfEnr03 = dfEnr03.append(dfOut, ignore_index=True)
bar()
return dfEnr03
def parse_enr04_data(self, sub):
dfColumns = ['name', 'type', 'coords', 'freq']
df = pd.DataFrame(columns=dfColumns)
print("Parsing "+ self.country +"-ENR-4."+ sub +" Data (RADIO NAVIGATION AIDS - EN-ROUTE)...")
getData = self.get_table_soup(self.country + "-ENR-4."+ sub +"-en-GB.html")
listData = getData.find_all("tr", class_ = "Table-row-type-3")
barLength = len(listData)
with alive_bar(barLength) as bar: # Define the progress bar
for row in listData:
# Split out the point name
id = row['id']
name = id.split('-')
# Find the point location
lat = self.search("([\d]{6}[\.]{0,1}[\d]{0,2}[N|S]{1})", "T", str(row))
lon = self.search("([\d]{7}[\.]{0,1}[\d]{0,2}[E|W]{1})", "T", str(row))
pointLat = re.search(r"([\d]{6}(\.[\d]{2}|))([N|S]{1})", str(lat))
pointLon = re.search(r"([\d]{7}(\.[\d]{2}|))([W|E]{1})", str(lon))
if pointLat:
fullLocation = self.sct_location_builder(
pointLat.group(1),
pointLon.group(1),
pointLat.group(3),
pointLon.group(3)
)
if sub == "1":
# Do this for ENR-4.1
# Set the navaid type correctly
if name[1] == "VORDME":
name[1] = "VOR"
#elif name[1] == "DME": # prob don't need to add all the DME points in this area
# name[1] = "VOR"
# find the frequency
freq_search = self.search("([\d]{3}\.[\d]{3})", "T", str(row))
freq = pointLat = re.search(r"([\d]{3}\.[\d]{3})", str(freq_search))
# Add navaid to the aerodromeDB
dfOut = {'name': str(name[2]), 'type': str(name[1]), 'coords': str(fullLocation), 'freq': freq.group(1)}
elif sub == "4":
# Add fix to the aerodromeDB
dfOut = {'name': str(name[1]), 'type': 'FIX', 'coords': str(fullLocation), 'freq': '000.000'}
df = df.append(dfOut, ignore_index=True)
bar()
return df
def parse_enr051_data(self):
"""This will parse ENR 5.1 data from the given AIP"""
print("Parsing "+ self.country +"-ENR-5.1 data for PROHIBITED, RESTRICTED AND DANGER AREAS...")
get_data = self.get_table_soup(self.country + "-ENR-5.1-en-GB.html")
output = self.airspace_parser(get_data, 2)
return output[6]
def test(self): # testing code - remove for live
test = self.parse_enr051_data()
test.to_csv('Dataframes/Enr051.csv')
@staticmethod
def plusMinus(arg):
"""Turns a compass point into the correct + or - for lat and long"""
if arg in ('N','E'):
return "+"
return "-"
def run(self):
full_dir = f"{work_dir}\\DataFrames\\"
Ad01 = self.parse_ad01_data() # returns single dataframe
Ad02 = self.parse_ad02_data(Ad01) # returns dfAd01, df_rwy, df_srv
Ad0217 = self.parse_ad0217_data(Ad01) # returns single dataframe
Enr016 = self.parse_enr016_data(Ad01) # returns single dataframe
Enr021 = self.parse_enr021_data() # returns dfFir, dfUir, dfCta, dfTma
Enr022 = self.parse_enr022_data() # returns single dataframe
Enr031 = self.parse_enr03_data('1') # returns single dataframe
Enr033 = self.parse_enr03_data('3') # returns single dataframe
Enr035 = self.parse_enr03_data('5') # returns single dataframe
Enr041 = self.parse_enr04_data('1') # returns single dataframe
Enr044 = self.parse_enr04_data('4') # returns single dataframe
Enr051 = self.parse_enr051_data() # returns single dataframe
AccUac = self.acc_uac_control_sectors() # returns single dataframe
Ad01.to_csv(f'{full_dir}Ad01.csv')
Ad02[1].to_csv(f'{full_dir}Ad02-Runways.csv')
Ad02[2].to_csv(f'{full_dir}Ad02-Services.csv')
Ad0217.to_csv(f'{full_dir}Ad0217-ATS.csv')
Enr016.to_csv(f'{full_dir}Enr016.csv')
Enr021[0].to_csv(f'{full_dir}Enr021-FIR.csv')
Enr021[1].to_csv(f'{full_dir}Enr021-UIR.csv')
Enr021[2].to_csv(f'{full_dir}Enr021-CTA.csv')
Enr021[3].to_csv(f'{full_dir}Enr021-TMA.csv')
Enr022.to_csv(f'{full_dir}Enr022-ATZ.csv')
Enr031.to_csv(f'{full_dir}Enr031.csv')
Enr033.to_csv(f'{full_dir}Enr033.csv')
Enr035.to_csv(f'{full_dir}Enr035.csv')
Enr041.to_csv(f'{full_dir}Enr041.csv')
Enr044.to_csv(f'{full_dir}Enr044.csv')
Enr051.to_csv(f'{full_dir}Enr051.csv')
AccUac.to_csv(f'{full_dir}AccUac.csv')
return [Ad01, Ad02, Enr016, Enr021, Enr022, Enr031, Enr033, Enr035, Enr041, Enr044, Enr051]
@staticmethod
def search(find, name, string):
searchString = find + "(?=<\/span>.*>" + name + ")"
result = re.findall(f"{str(searchString)}", str(string))
return result
@staticmethod
def split(word):
return [char for char in word]
def sct_location_builder(self, lat, lon, lat_ns, lon_ew):
"""Returns an SCT file compliant location"""
lat_split = self.split(lat) # split the lat into individual digits
if len(lat_split) > 6:
lat_print = f"{lat_ns}{lat_split[0]}{lat_split[1]}.{lat_split[2]}{lat_split[3]}.{lat_split[4]}{lat_split[5]}.{lat_split[7]}{lat_split[8]}"
else:
lat_print = f"{lat_ns}{lat_split[0]}{lat_split[1]}.{lat_split[2]}{lat_split[3]}.{lat_split[4]}{lat_split[5]}.00"
lon_split = self.split(lon)
if len(lon_split) > 7:
lon_print = f"{lon_ew}{lon_split[0]}{lon_split[1]}{lon_split[2]}.{lon_split[3]}{lon_split[4]}.{lon_split[5]}{lon_split[6]}.{lon_split[8]}{lon_split[9]}"
else:
lon_print = f"{lon_ew}{lon_split[0]}{lon_split[1]}{lon_split[2]}.{lon_split[3]}{lon_split[4]}.{lon_split[5]}{lon_split[6]}.00"
fullLocation = f"{lat_print} {lon_print}" # AD-2.2 gives aerodrome location as DDMMSS / DDDMMSS
return fullLocation
def getBoundary(self, space, name=0):
"""creates a boundary useable in vatSys from AIRAC data"""
lat = True
lat_lon_obj = []
draw_line = []
fullBoundary = ''
for coord in space:
coord_format = re.search(r"[N|S][\d]{2,3}\.[\d]{1,2}\.[\d]{1,2}\.[\d]{1,2}\s[E|W][\d]{2,3}\.[\d]{1,2}\.[\d]{1,2}\.[\d]{1,2}", str(coord))
if coord_format != None:
fullBoundary += f"{coord}/"
else:
if lat:
lat_lon_obj.append(coord[0])
lat_lon_obj.append(coord[1])
lat = False
else:
lat_lon_obj.append(coord[0])
lat_lon_obj.append(coord[1])
lat = True
# if lat_lon_obj has 4 items
if len(lat_lon_obj) == 4:
lat_lon = self.sct_location_builder(lat_lon_obj[0], lat_lon_obj[2], lat_lon_obj[1], lat_lon_obj[3])
fullBoundary += f"{lat_lon}/"
draw_line.append(lat_lon)
lat_lon_obj = []
return fullBoundary.rstrip('/')
@staticmethod
def split_single(word):
return [char for char in str(word)]
def dms2dd(self, lat, lon, ns, ew):
lat_split = self.split_single(lat)
lon_split = self.split_single(lon)
lat_dd = lat_split[0] + lat_split[1]
lat_mm = lat_split[2] + lat_split[3]
lat_ss = lat_split[4] + lat_split[5]
# lat N or S (+/-) lon E or W (+/-)
lat_out = int(lat_dd) + int(lat_mm) / 60 + int(lat_ss) / 3600
lon_dd = lon_split[0] + lon_split[1] + lon_split[2]
lon_mm = lon_split[3] + lon_split[4]
lon_ss = lon_split[5] + lon_split[6]
lon_out = int(lon_dd) + int(lon_mm) / 60 + int(lon_ss) / 3600
if ns == "S":
lat_out = lat_out - (lat_out * 2)
if ew == "W":
lon_out = lon_out - (lon_out * 2)
return [lat_out, lon_out]
def generate_semicircle(self, center_x, center_y, start_x, start_y, end_x, end_y, direction, dst=2.5):
"""Dreate a semicircle. Direction is 1 for clockwise and 2 for anti-clockwise"""
if (direction == 1) or (direction == 2):
# centre point to start
geolib_start = Geodesic.WGS84.Inverse(center_x, center_y, start_x, start_y)
start_brg = geolib_start['azi1']
start_dst = geolib_start['s12']
start_brg_compass = ((360 + start_brg) % 360)
# centre point to end
geolib_end = Geodesic.WGS84.Inverse(center_x, center_y, end_x, end_y)
end_brg = geolib_end['azi1']
end_brg_compass = ((360 + end_brg) % 360)
elif direction == 3: # if direction set to 3, draw a circle
start_brg = 0
start_dst = dst * 1852 # convert nautical miles to meters
end_brg_compass = 359
direction = 1 # we can set the direction to 1 as the bit of code below can still be used
arc_out = []
if direction == 1: # if cw
while round(start_brg) != round(end_brg_compass):
arc_coords = Geodesic.WGS84.Direct(center_x, center_y, start_brg, start_dst)
arc_out.append(self.dd2dms(arc_coords['lat2'], arc_coords['lon2'], "1"))
start_brg = ((start_brg + 1) % 360)
elif direction == 2: # if acw
while round(start_brg) != round(end_brg_compass):
arc_coords = Geodesic.WGS84.Direct(center_x, center_y, start_brg, start_dst)
arc_out.append(self.dd2dms(arc_coords['lat2'], arc_coords['lon2'], "1"))
start_brg = ((start_brg - 1) % 360)
return arc_out
@staticmethod
def dd2dms(latitude, longitude, dd_type=0):
# math.modf() splits whole number and decimal into tuple
# eg 53.3478 becomes (0.3478, 53)
split_degx = math.modf(longitude)
# the whole number [index 1] is the degrees
degrees_x = int(split_degx[1])
# multiply the decimal part by 60: 0.3478 * 60 = 20.868
# split the whole number part of the total as the minutes: 20
# abs() absoulte value - no negative
minutes_x = abs(int(math.modf(split_degx[0] * 60)[1]))
# multiply the decimal part of the split above by 60 to get the seconds
# 0.868 x 60 = 52.08, round excess decimal places to 2 places
# abs() absoulte value - no negative
seconds_x = abs(round(math.modf(split_degx[0] * 60)[0] * 60,2))
# repeat for latitude
split_degy = math.modf(latitude)
degrees_y = int(split_degy[1])
minutes_y = abs(int(math.modf(split_degy[0] * 60)[1]))
seconds_y = abs(round(math.modf(split_degy[0] * 60)[0] * 60,2))
# account for E/W & N/S
if longitude < 0:
EorW = "W"
else:
EorW = "E"
if latitude < 0:
NorS = "S"
else:
NorS = "N"
# abs() remove negative from degrees, was only needed for if-else above
output = (NorS + str(abs(round(degrees_y))).zfill(3) + "." + str(round(minutes_y)).zfill(2) + "." + str(seconds_y).zfill(3) + " " + EorW + str(abs(round(degrees_x))).zfill(3) + "." + str(round(minutes_x)).zfill(2) + "." + str(seconds_x).zfill(3))
return output
class Builder:
'''Class to build sct files from the dataframes for POSCON'''
def __init__(self, fileImport=0):
self.mapCentre = "+53.7-1.5"
# if there are dataframe files present then use those, else run the webscraper
if fileImport == 1:
scrape = []
scrape.append(pd.read_csv('Dataframes/Ad01.csv', index_col=0)) #0
scrape.append(pd.read_csv('Dataframes/Ad02-Runways.csv', index_col=0)) #1
scrape.append(pd.read_csv('Dataframes/Ad02-Services.csv', index_col=0)) #2
scrape.append(pd.read_csv('Dataframes/Enr016.csv', index_col=0)) #3
scrape.append(pd.read_csv('DataFrames/Enr021-FIR.csv', index_col=0)) #4
scrape.append(pd.read_csv('DataFrames/Enr021-UIR.csv', index_col=0)) #5
scrape.append(pd.read_csv('DataFrames/Enr021-CTA.csv', index_col=0)) #6
scrape.append(pd.read_csv('DataFrames/Enr021-TMA.csv', index_col=0)) #7
scrape.append( | pd.read_csv('DataFrames/Enr022-ATZ.csv', index_col=0) | pandas.read_csv |
from io import StringIO
import pandas as pd
import numpy as np
import pytest
import bioframe
import bioframe.core.checks as checks
# import pyranges as pr
# def bioframe_to_pyranges(df):
# pydf = df.copy()
# pydf.rename(
# {"chrom": "Chromosome", "start": "Start", "end": "End"},
# axis="columns",
# inplace=True,
# )
# return pr.PyRanges(pydf)
# def pyranges_to_bioframe(pydf):
# df = pydf.df
# df.rename(
# {"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
# axis="columns",
# inplace=True,
# )
# return df
# def pyranges_overlap_to_bioframe(pydf):
# ## convert the df output by pyranges join into a bioframe-compatible format
# df = pydf.df.copy()
# df.rename(
# {
# "Chromosome": "chrom_1",
# "Start": "start_1",
# "End": "end_1",
# "Start_b": "start_2",
# "End_b": "end_2",
# },
# axis="columns",
# inplace=True,
# )
# df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
# df["chrom_2"] = df["chrom_1"].values
# return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
### select with non-standard column names
region1 = "chrX:4-6"
new_names = ["chr", "chrstart", "chrend"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=new_names,
)
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]],
columns=new_names,
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
region1 = "chrX"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
### select from a DataFrame with NaNs
colnames = ["chrom", "start", "end", "view_region"]
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_result = pd.DataFrame(
[["chr1", -6, 12, "chr1p"]],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
region1 = "chr1:0-1"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df, region1).reset_index(drop=True)
)
def test_trim():
### trim with view_df
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 32, 36, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 26, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
with pytest.raises(ValueError):
bioframe.trim(df, view_df=view_df)
# df_view_col already exists, so need to specify it:
pd.testing.assert_frame_equal(
df_trimmed, bioframe.trim(df, view_df=view_df, df_view_col="view_region")
)
### trim with view_df interpreted from dictionary for chromsizes
chromsizes = {"chr1": 20, "chrX_0": 5}
df = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX_0", 1, 8],
],
columns=["chrom", "startFunky", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 20],
["chrX_0", 1, 5],
],
columns=["chrom", "startFunky", "end"],
).astype({"startFunky": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(
df,
view_df=chromsizes,
cols=["chrom", "startFunky", "end"],
return_view_columns=False,
),
)
### trim with default limits=None and negative values
df = pd.DataFrame(
[
["chr1", -4, 12],
["chr1", 13, 26],
["chrX", -5, -1],
],
columns=["chrom", "start", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX", 0, 0],
],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim when there are NaN intervals
df = pd.DataFrame(
[
["chr1", -4, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", -5, -1, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 0, 0, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim with view_df and NA intervals
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12],
["chr1", 0, 12],
[pd.NA, pd.NA, pd.NA],
["chrX", 1, 20],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, pd.NA],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
# infer df_view_col with assign_view and ignore NAs
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(df, view_df=view_df, df_view_col=None, return_view_columns=True)[
["chrom", "start", "end", "view_region"]
],
)
def test_expand():
d = """chrom start end
0 chr1 1 5
1 chr1 50 55
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+")
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 -9 15
1 chr1 40 65
2 chr2 90 210"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with negative pad
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 110 190"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp, side="left")
d = """chrom start end
0 chr1 3 5
1 chr1 52 55
2 chr2 110 200"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with multiplicative pad
mult = 0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 150 150"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
mult = 2.0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 -1 7
1 chr1 48 58
2 chr2 50 250"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with NA and non-integer multiplicative pad
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
mult = 1.10
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 95 205"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df, fake_expanded)
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert df1.equals(df2) == False
# p1 = bioframe_to_pyranges(df1)
# p2 = bioframe_to_pyranges(df2)
# pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# bb = bioframe.overlap(df1, df2, how="inner")[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# pp = pp.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# bb = bb.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
# print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 2
b = bioframe.overlap(
df1,
df2,
on=None,
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 0
### test overlap 'left', 'outer', and 'right'
b = bioframe.overlap(
df1,
df2,
on=None,
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 5
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="inner",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 0
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="right",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 2
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
### test keep_order and NA handling
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+"],
[pd.NA, pd.NA, pd.NA, "-"],
["chrX", 1, 8, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, "+"], [pd.NA, pd.NA, pd.NA, "-"], ["chrX", 7, 10, "-"]],
columns=["chrom2", "start2", "end2", "strand"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=True, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
assert ~df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=False, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chrX", 1, 8, pd.NA, pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, pd.NA, "tiger"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert (
bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
keep_order=False,
).shape
== (3, 12)
)
### result of overlap should still have bedframe-like properties
overlap_df = bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
overlap_df = bioframe.overlap(
df1,
df2,
how="innter",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
# test keep_order incompatible if how!= 'left'
with pytest.raises(ValueError):
bioframe.overlap(
df1,
df2,
how="outer",
on=["animal"],
cols2=["chrom2", "start2", "end2"],
keep_order=True,
)
def test_cluster():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 1])
).all() # the last interval does not overlap the first three
df_annotated = bioframe.cluster(df1, min_dist=2)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 0])
).all() # all intervals part of the same cluster
df_annotated = bioframe.cluster(df1, min_dist=None)
assert (
df_annotated["cluster"].values == np.array([0, 0, 1, 2])
).all() # adjacent intervals not clustered
df1.iloc[0, 0] = "chrX"
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([2, 0, 0, 1])
).all() # do not cluster intervals across chromosomes
# test consistency with pyranges (which automatically sorts df upon creation and uses 1-based indexing for clusters)
# assert (
# (bioframe_to_pyranges(df1).cluster(count=True).df["Cluster"].values - 1)
# == bioframe.cluster(df1.sort_values(["chrom", "start"]))["cluster"].values
# ).all()
# test on=[] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert (
bioframe.cluster(df1, on=["animal"])["cluster"].values == np.array([0, 1, 0, 2])
).all()
assert (
bioframe.cluster(df1, on=["strand"])["cluster"].values == np.array([0, 1, 1, 2])
).all()
assert (
bioframe.cluster(df1, on=["location", "animal"])["cluster"].values
== np.array([0, 2, 1, 3])
).all()
### test cluster with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.cluster(df1)["cluster"].max() == 3
assert bioframe.cluster(df1, on=["strand"])["cluster"].max() == 4
pd.testing.assert_frame_equal(df1, bioframe.cluster(df1)[df1.columns])
assert checks.is_bedframe(
bioframe.cluster(df1, on=["strand"]),
cols=["chrom", "cluster_start", "cluster_end"],
)
assert checks.is_bedframe(
bioframe.cluster(df1), cols=["chrom", "cluster_start", "cluster_end"]
)
assert checks.is_bedframe(bioframe.cluster(df1))
def test_merge():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
# the last interval does not overlap the first three with default min_dist=0
assert (bioframe.merge(df1)["n_intervals"].values == np.array([3, 1])).all()
# adjacent intervals are not clustered with min_dist=none
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values == np.array([2, 1, 1])
).all()
# all intervals part of one cluster
assert (
bioframe.merge(df1, min_dist=2)["n_intervals"].values == np.array([4])
).all()
df1.iloc[0, 0] = "chrX"
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values
== np.array([1, 1, 1, 1])
).all()
assert (
bioframe.merge(df1, min_dist=0)["n_intervals"].values == np.array([2, 1, 1])
).all()
# total number of intervals should equal length of original dataframe
mock_df = mock_bioframe()
assert np.sum(bioframe.merge(mock_df, min_dist=0)["n_intervals"].values) == len(
mock_df
)
# # test consistency with pyranges
# pd.testing.assert_frame_equal(
# pyranges_to_bioframe(bioframe_to_pyranges(df1).merge(count=True)),
# bioframe.merge(df1),
# check_dtype=False,
# check_exact=False,
# )
# test on=['chrom',...] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert len(bioframe.merge(df1, on=None)) == 2
assert len(bioframe.merge(df1, on=["strand"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location", "animal"])) == 4
d = """ chrom start end animal n_intervals
0 chr1 3 10 cat 2
1 chr1 3 8 dog 1
2 chrX 6 10 cat 1"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.merge(df1, on=["animal"]),
check_dtype=False,
)
# merge with repeated indices
df = pd.DataFrame(
{"chrom": ["chr1", "chr2"], "start": [100, 400], "end": [110, 410]}
)
df.index = [0, 0]
pd.testing.assert_frame_equal(
df.reset_index(drop=True), bioframe.merge(df)[["chrom", "start", "end"]]
)
# test merge with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.merge(df1).shape[0] == 4
assert bioframe.merge(df1)["start"].iloc[0] == 1
assert bioframe.merge(df1)["end"].iloc[0] == 12
assert bioframe.merge(df1, on=["strand"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[1] == df1.shape[1] + 1
assert checks.is_bedframe(bioframe.merge(df1, on=["strand", "animal"]))
def test_complement():
### complementing a df with no intervals in chrX by a view with chrX should return entire chrX region
df1 = pd.DataFrame(
[["chr1", 1, 5], ["chr1", 3, 8], ["chr1", 8, 10], ["chr1", 12, 14]],
columns=["chrom", "start", "end"],
)
df1_chromsizes = {"chr1": 100, "chrX": 100}
df1_complement = pd.DataFrame(
[
["chr1", 0, 1, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with two chromosomes ###
df1.iloc[0, 0] = "chrX"
df1_complement = pd.DataFrame(
[
["chr1", 0, 3, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 1, "chrX:0-100"],
["chrX", 5, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with no view_df and a negative interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-9223372036854775807"],
["chr1", 20, np.iinfo(np.int64).max, "chr1:0-9223372036854775807"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1), df1_complement)
### test complement with an overhanging interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
chromsizes = {"chr1": 15}
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-15"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=chromsizes, view_name_col="VR"), df1_complement
)
### test complement where an interval from df overlaps two different regions from view
### test complement with no view_df and a negative interval
df1 = pd.DataFrame([["chr1", 5, 15]], columns=["chrom", "start", "end"])
chromsizes = [("chr1", 0, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
df1_complement = pd.DataFrame(
[["chr1", 0, 5, "chr1p"], ["chr1", 15, 20, "chr1q"]],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
### test complement with NAs
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 5, 15], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
with pytest.raises(ValueError): # no NAs allowed in chromsizes
bioframe.complement(
df1, [("chr1", pd.NA, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
)
assert checks.is_bedframe(bioframe.complement(df1, chromsizes))
def test_closest():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 4, 8], ["chr1", 10, 11]], columns=["chrom", "start", "end"]
)
### closest(df1,df2,k=1) ###
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 4 8 0"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### closest(df1,df2, ignore_overlaps=True)) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True)
)
### closest(df1,df2,k=2) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 4 8 0
1 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), k=2)
)
### closest(df2,df1) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 4 8 chr1 1 5 0
1 chr1 10 11 chr1 1 5 5 """
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df2, df1, suffixes=("_1", "_2")))
### change first interval to new chrom ###
df2.iloc[0, 0] = "chrA"
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### test other return arguments ###
df2.iloc[0, 0] = "chr1"
d = """
index index_ have_overlap overlap_start overlap_end distance
0 0 0 True 4 5 0
1 0 1 False <NA> <NA> 5
"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.closest(
df1,
df2,
k=2,
return_overlap=True,
return_index=True,
return_input=False,
return_distance=True,
),
check_dtype=False,
)
# closest should ignore empty groups (e.g. from categorical chrom)
df = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
d = """ chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chrX 1 8 chrX 2 10 0
1 chrX 2 10 chrX 1 8 0"""
df_closest = pd.read_csv(StringIO(d), sep=r"\s+")
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df = df.astype({"chrom": df_cat})
pd.testing.assert_frame_equal(
df_closest,
bioframe.closest(df, suffixes=("_1", "_2")),
check_dtype=False,
check_categorical=False,
)
# closest should ignore null rows: code will need to be modified
# as for overlap if an on=[] option is added
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_1": pd.Int64Dtype(),
"end_1": pd.Int64Dtype(),
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True, k=5)
)
with pytest.raises(ValueError): # inputs must be valid bedFrames
df1.iloc[0, 0] = "chr10"
bioframe.closest(df1, df2)
def test_coverage():
#### coverage does not exceed length of original interval
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chr1", 2, 10]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of interval on different chrom returns zero for coverage and n_overlaps
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = | pd.DataFrame([["chrX", 3, 8]], columns=["chrom", "start", "end"]) | pandas.DataFrame |
import requests
import pandas as pd
import util_functions as uf
import geopandas as gpd
from shapely.geometry import Point, Polygon
def extract_json(json_id):
# Loop through each feature in GeoJson and pull our metadata and polygon
url = "https://opendata.arcgis.com/datasets/{}.geojson".format(json_id)
resp = requests.get(url).json()
# Define empty list for concat
feature_df_list = []
for enum, feature in enumerate(resp['features']):
# Pull out metadata
feature_df = pd.DataFrame(feature['properties'], index=[enum])
# Convert Polygon geometry to geodataframe
geometry_df = gpd.GeoDataFrame(feature['geometry'])
# Convert geometry to polygon and add back to metadata dataframe
feature_df['polygon'] = Polygon(geometry_df['coordinates'].iloc[0])
feature_df_list.append(feature_df)
# Combine each Cluster into master dataframe
combined_df = | pd.concat(feature_df_list, axis=0) | pandas.concat |
from time import time
import os
import sys
import shutil
import click
from pathlib import Path
import config
from openslide import OpenSlide
import pandas as pd
from tqdm import tqdm
from skimage.morphology import remove_small_objects
from skimage.io import imread
from skimage.transform import rescale
import random
import numpy as np
import keras
from weighted_loss_unet import *
from predict import (
predict_image,
make_pred_dataframe,
make_patient_dataframe,
post_processing,
predict_path
)
c = config.Config()
def safe_save(df: pd.DataFrame, location: Path):
tmp = Path(__file__).parent / ".tmp.pickle"
df.reset_index(drop=True).to_feather(tmp)
shutil.copy(tmp, location)
def mask(img):
img_mean = np.mean(img, axis=-1) / 255
mask = np.logical_and(0.1 < img_mean, img_mean < 0.9)
mask = remove_small_objects(mask, np.sum(mask) * 0.1)
return mask
def tissue_positions(slide: OpenSlide):
thumbnail = slide.get_thumbnail((1000, 1000))
tissue = mask(thumbnail)
scale_factor = max(slide.dimensions) / max(thumbnail.size)
coords = np.where(tissue)
coords = [(c * scale_factor).astype(np.int) for c in coords]
coords = list(zip(*coords))
return coords
def slide_patches(slide: OpenSlide, n=10, width=1024):
coords = tissue_positions(slide)
for y, x in coords[:: int(len(coords) / n)]:
y, x = y - int(width / 2), x - int(width / 2)
yield np.array(slide.read_region((x, y), 0, (width, width)))[..., :3]
@click.command()
@click.option(
"--destination",
"-d",
type=click.Path(file_okay=False, dir_okay=True),
default=Path(__file__).parent,
help="Destination folder.",
)
@click.option(
"--model_name", "-m", default="unet_quip_10000", help="Name of segmentation model."
)
@click.option("--cutoff", "-c", default=0.05, help="Cutoff for the segmentation model.")
@click.option("--min_size", "-s", default=5, help="Smallest size in pixels for a cell.")
@click.option(
"--scale",
"-r",
default=1.0,
help="Target scale when rescaling image during prediction of segmentation map. Might improve result if the images are captures a different magnification than x40.",
)
@click.option(
"--n_samples",
"-n",
default=200,
help="The number samples segmended from the whole slide image.",
)
@click.option(
"--size",
default=1024,
help="Size of each sample",
)
@click.option(
"--stride",
default= 256,
help="Stride in pixels for segmentation prediction.",
)
@click.argument("source", type=click.Path())
@click.argument("image_type", type=click.Choice(["WSI", "TMA"]))
def main(**kwargs):
image_type = kwargs["image_type"]
destination_file = Path(kwargs["destination"]) / _file_name(**kwargs)
started = os.path.exists(destination_file)
if started:
print("Resuming..")
with open(destination_file) as f:
df_pat = | pd.read_feather(destination_file) | pandas.read_feather |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
| tm.assert_frame_equal(expected, result) | pandas.util.testing.assert_frame_equal |
import nibabel as nib
import os
import sys
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
from scipy.io import loadmat
# threshold of surface coverage 40 %
percentage = 40
# define species-specific settings
tracts_df = | pd.DataFrame(columns=('species', 'hemi', 'tract', 'tract_name')) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/01_data_provider.ipynb (unless otherwise specified).
__all__ = ['DataProvider', 'get_efficiently']
# Cell
from bs4 import BeautifulSoup as bs
import numpy as np
import os
import pandas as pd
from fastcore.foundation import patch
# Cell
class DataProvider():
def __init__(self, data_folder_path):
self.data_folder_path = data_folder_path
self.raw = os.path.join(data_folder_path, 'raw')
self.external = os.path.join(data_folder_path, 'external')
self.interim = os.path.join(data_folder_path, 'interim')
self.processed = os.path.join(data_folder_path, 'processed')
# Checking if folder paths exist
assert os.path.isdir(self.external), "External data folder not found."
assert os.path.isdir(self.raw), "Raw data folder not found."
assert os.path.isdir(self.interim), "Interim data folder not found."
assert os.path.isdir(self.processed), "Processed data folder not found."
# Phone screening files
self.phonescreening_data_path = os.path.join(self.raw, "phonescreening.csv")
self.phone_codebook_path = os.path.join(self.external, "phone_codebook.html")
# Basic assessment files
self.ba_codebook_path = os.path.join(self.external, "ba_codebook.html")
self.ba_data_path = os.path.join(self.raw, "ba.csv")
self.b07_participants_path = os.path.join(self.external, "b7_participants.xlsx")
# Movisense data
self.mov_berlin_path = os.path.join(self.raw, "mov_data_b.csv")
self.mov_dresden_path = os.path.join(self.raw, "mov_data_d.csv")
self.mov_mannheim_path = os.path.join(self.raw, "mov_data_m.csv")
self.mov_berlin_starting_dates_path = os.path.join(self.raw, "starting_dates_b.html")
self.mov_dresden_starting_dates_path = os.path.join(self.raw, "starting_dates_d.html")
self.mov_mannheim_starting_dates_path = os.path.join(self.raw, "starting_dates_m.html")
self.alcohol_per_drink_path = os.path.join(self.external,'alcohol_per_drink.csv')
#export
def get_efficiently(func):
"""
This decorator wraps around functions that get data and handles data storage.
If the output from the function hasn't been stored yet, it stores it in "[path_to_interim]/[function_name_without_get].parquet"
If the output from the function has been stored already, it loads the stored file instead of running the function (unless update is specified as True)
"""
def w(*args, update = False, columns = None, path = None, **kw):
_self = args[0] # Getting self to grab interim path from DataProvider
var_name = func.__name__.replace('__get_','').replace('get_','')
file_path = os.path.join(_self.interim, "%s.parquet"%var_name)
if os.path.exists(file_path) and (update == False):
result = pd.read_parquet(file_path, columns = columns)
else:
print("Preparing %s"%var_name)
result = func(_self)
result.to_parquet(file_path)
return result
w.__wrapped__ = func # Specifying the wrapped function for inspection
w.__doc__ = func.__doc__
w.__name__ = func.__name__
w.__annotations__ = {'cls':DataProvider, 'as_prop':False} # Adding parameters to make this work with @patch
return w
# Cell
@patch
def store_interim(self:DataProvider, df, filename):
path = os.path.join(self.interim,"%s.parquet"%filename)
df.to_parquet(path)
# Cell
@patch
def load_interim(self:DataProvider, filename):
return pd.read_parquet(os.path.join(self.interim,"%s.parquet"%filename))
# Cell
@patch
@get_efficiently
def get_phone_codebook(self:DataProvider):
tables = pd.read_html(open(self.phone_codebook_path,'r').read())
df = tables[1]
# Note that str.contains fills NaN values with nan, which can lead to strange results during filtering
df = df[df.LabelHinweistext.str.contains('Fragebogen:',na=False)==False]
df = df.set_index('#')
# Parsing variable name
df['variable'] = df["Variable / Feldname"].apply(lambda x: x.split(' ')[0])
# Parsing condition under which variable is displayed
df['condition'] = df["Variable / Feldname"].apply(lambda x: ' '.join(x.split(' ')[1:]).strip() if len(x.split(' '))>1 else '')
df['condition'] = df.condition.apply(lambda x: x.replace('Zeige das Feld nur wenn: ',''))
# Parsing labels for numerical data
df['labels'] = np.nan
labels = tables[2:-1]
try:
labels = [dict(zip(l[0],l[1])) for l in labels]
except:
display(table)
searchfor = ["radio","dropdown","yesno","checkbox"]
with_table = df['Feld Attribute (Feld-Typ, Prüfung, Auswahlen, Verzweigungslogik, Berechnungen, usw.)'].str.contains('|'.join(searchfor))
df.loc[with_table,'labels'] = labels
df = df.astype(str)
return df
# Cell
@patch
def determine_phone_b07(self:DataProvider, df):
# Some initial fixes
df.loc[df.center=='d','screen_caller'] = df.loc[df.center=='d','screen_caller'].str.lower().str.strip().replace('leo','<NAME>').replace('<NAME>','<NAME>').replace('<NAME>','<NAME>').replace('<NAME>','<NAME>').replace('dorothee','<NAME>')
# Cleaning screener list
dd_screeners = df[(df.center=='d')&(df.screen_caller.isna()==False)].screen_caller.unique()
def clean_screeners(dd_screeners):
dd_screeners = [y for x in dd_screeners for y in x.split('+')]
dd_screeners = [y for x in dd_screeners for y in x.split(',')]
dd_screeners = [y for x in dd_screeners for y in x.split('und')]
dd_screeners = [y.replace('(15.02.21)','') for x in dd_screeners for y in x.split('/')]
dd_screeners = [y.replace(')','').strip().lower() for x in dd_screeners for y in x.split('(')]
dd_screeners = sorted(list(set(dd_screeners)))
return dd_screeners
dd_screeners = clean_screeners(dd_screeners)
b07_screeners = ['<NAME>','<NAME>','<NAME>','<NAME>','borchardt','<NAME>','<NAME>','<NAME>','<NAME>']
s01_screeners = ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', 'alice','<NAME> <NAME>', '<NAME>', '22.10.2021', 'sascha', '03.08.2021', '<NAME>', '<NAME>', '04.08.2021', '<NAME>', 'sacsha', '09.08.2021', 'ml', 'charlotte', '<NAME>', 'shereen', 'test', "<NAME>", 'benedikt']
known_dd_screeners = list(b07_screeners+s01_screeners)
dd_screeners = df[(df.center=='d')&(df.screen_caller.isna()==False)].screen_caller.unique()
# Checking if all Dresden phone screeners are accounted for
assert df[(df.center=='d')&(df.screen_caller)].screen_caller.str.contains('|'.join(known_dd_screeners)).mean()==1, "Unknown Dresden phone screener: %s"%', '.join(set(clean_screeners(dd_screeners))-set(known_dd_screeners))
# In general, if a screener from a project was involved, it was screened for that project
df['screened_for_b07'] = (df.center=='d') & (df.screen_caller.str.contains('|'.join(b07_screeners)))
df['screened_for_s01'] = (df.center!='d') | (df.screen_caller.str.contains('|'.join(s01_screeners)))
# We also exclude participants screened for C02 in Berlin
df.loc[(df.screen_purpose == 4) & (df.center=='b'), 'screened_for_s01'] = False
# Additionally, we also set it to true if it was specifically set
df.loc[df.screen_site_dd == 1, 'screened_for_s01'] = True
df.loc[df.screen_site_dd == 3, 'screened_for_s01'] = True
df.loc[df.screen_site_dd == 2, 'screened_for_b07'] = True
df.loc[df.screen_site_dd == 3, 'screened_for_b07'] = True
return df
# Cell
@patch
def check_participant_id(self:DataProvider,x):
'''This function checks whether a participant ID is numerical and lower than 20000.'''
if str(x) == x:
if x.isnumeric():
x = float(x)
else:
return False
if x > 20000:
return False
return True
# Cell
@patch
def test_check_participant_id(self:DataProvider):
failed = dp.check_participant_id('test10') == False # Example of a bad participant ID
passed = dp.check_participant_id('100') == True # Example of a good participant ID
return failed and passed
# Cell
@patch
def set_dtypes(self:DataProvider, data, codebook):
def number_or_nan(x):
try:
float(x)
return x
except:
return np.nan
'''This function automatically adjust data types of redcap data based on the redcap codebooks'''
# Parsing type
codebook['type'] = codebook["Feld Attribute (Feld-Typ, Prüfung, Auswahlen, Verzweigungslogik, Berechnungen, usw.)"].apply(lambda x: x.split(',')[0])
# Descriptives (not in data)
desc_columns = list(codebook[codebook.type.str.contains('descriptive')].variable)
# Datetime
dt_columns = codebook[(codebook.type.isin(['text (datetime_dmy)','text (date_dmy)']))].variable
dt_columns = list(set(data.columns).intersection(dt_columns))
# Numerical
num_columns = []
num_columns += list(codebook[codebook.type.str.contains('calc')].variable)
num_columns += list(codebook[codebook.type.str.contains('checkbox')].variable)
num_columns += list(codebook[codebook.type.str.contains('radio')].variable)
num_columns += list(codebook[codebook.type.str.contains('text \(number')].variable)
num_columns += list(codebook[codebook.type.str.contains('yesno')].variable)
num_columns += list(codebook[codebook.type.str.contains('dropdown')].variable)
num_columns += list(codebook[codebook.type.str.contains('slider')].variable)
num_columns = list(set(data.columns).intersection(num_columns))
# Text
text_columns = []
text_columns += list(codebook[(codebook.type.str.contains('text')) & (~codebook.type.str.contains('date_dmy|datetime_dmy'))].variable)
text_columns += list(codebook[(codebook.type.str.contains('notes'))].variable)
text_columns += list(codebook[(codebook.type.str.contains('file'))].variable)
text_columns = list(set(data.columns).intersection(text_columns))
assert len(set(num_columns).intersection(set(dt_columns)))==0, set(num_columns).intersection(set(dt_columns))
assert len(set(text_columns).intersection(set(dt_columns)))==0, set(text_columns).intersection(set(dt_columns))
for c in num_columns:
data[c].replace("A 'MySQL server has gone away' error was detected. It is possible that there was an actual database issue, but it is more likely that REDCap detected this request as a duplicate and killed it.", np.nan, inplace = True)
try:
data[c] = data[c].astype(float)
except:
data[c] = data[c].apply(number_or_nan).astype(float)
print("Values with wrong dtype in %s"%c)
data[text_columns] = data[text_columns].astype(str).replace('nan',np.nan)
for c in dt_columns:
data[c] = pd.to_datetime(data[c])
return data
# Cell
@patch
@get_efficiently
def get_phone_data(self:DataProvider):
df = pd.read_csv(self.phonescreening_data_path,
na_values = ["A 'MySQL server has gone away' error was detected. It is possible that there was an actual database issue, but it is more likely that REDCap detected this request as a duplicate and killed it."]
)
remove = ['050571', '307493', '345678', '715736', 'Ihloff', 'test',
'test002', 'test003', 'test004', 'test005', 'test01', 'test02',
'test03', 'test0722', 'test1', 'test34', 'test999', 'test2020',
'test20201', 'test345345', 'testt', 'test_10', 'test_11_26',
'test_neu', 'xx956','050262', '050335', '050402', '050416', '051005', '294932', '891752080', '898922719', '898922899', '917702419', '01627712983', 'meow', 'test0022', 'test246', 'test5647', 'test22222', 'test41514', 'testtt', 'test_057', 'tets','898923271', 'test001', 'test006', 'test007', 'test008', 'test11', 'test_23_12', 'test_n','50744', 'test0001a', 'test004', 'test03', 'tets']
df = df[~df.participant_id.astype(str).isin(remove)]
bad_ids = df[~df.participant_id.apply(self.check_participant_id)].participant_id.unique()
assert len(bad_ids)==0, "Bad participant IDs (should be added to remove): %s"%', '.join(["'%s'"%b for b in bad_ids])
self.get_phone_codebook()
df = self.set_dtypes(df, self.get_phone_codebook())
df['participant_id'] = df.participant_id.astype(int)
df['center'] = df.screen_site.replace({1:'b',2:'d',3:'m'})
df['screen_date'] = pd.to_datetime(df['screen_date'], errors = 'coerce')
#display(df[df.screen_caller.isna()])
df = self.determine_phone_b07(df)
return df
# Cell
@patch
@get_efficiently
def get_ba_codebook(self:DataProvider):
tables = pd.read_html(open(self.ba_codebook_path,"r").read())
df = tables[1]
# Note that str.contains fills NaN values with nan, which can lead to strange results during filtering
df = df[df.LabelHinweistext.str.contains('Fragebogen:',na=False)==False]
df = df.set_index('#')
# Parsing variable name
df['variable'] = df["Variable / Feldname"].apply(lambda x: x.split(' ')[0])
# Parsing condition under which variable is displayed
df['condition'] = df["Variable / Feldname"].apply(lambda x: ' '.join(x.split(' ')[1:]).strip() if len(x.split(' '))>1 else '')
df['condition'] = df.condition.apply(lambda x: x.replace('Zeige das Feld nur wenn: ',''))
# Parsing labels for numerical data
df['labels'] = np.nan
labels = tables[2:-1]
try:
labels = [dict(zip(l[0],l[1])) for l in labels]
except:
display(table)
searchfor = ["radio","dropdown","yesno","checkbox"]
with_table = df['Feld Attribute (Feld-Typ, Prüfung, Auswahlen, Verzweigungslogik, Berechnungen, usw.)'].str.contains('|'.join(searchfor))
df.loc[with_table,'labels'] = labels
df = df.astype(str)
return df
# Cell
@patch
@get_efficiently
def get_ba_data(self:DataProvider):
'''This function reads in baseline data from redcap, filters out pilot data, and creates movisens IDs.'''
df = pd.read_csv(self.ba_data_path)
df['center'] = df.groupby('participant_id').bx_center.transform(lambda x: x.ffill().bfill())
df['center'] = df.center.replace({1:'b',2:'d',3:'m'})
# Creating new movisense IDs (adding center prefix to movisense IDs)
for old_id in ['bx_movisens','bx_movisens_old','bx_movisens_old_2']:
new_id = old_id.replace('bx_','').replace('movisens','mov_id')
df[new_id] = df.groupby('participant_id')[old_id].transform(lambda x: x.ffill().bfill())
df[new_id] = df.center + df[new_id].astype('str').str.strip('0').str.strip('.').apply(lambda x: x.zfill(3))
df[new_id].fillna('nan',inplace = True)
df.loc[df[new_id].str.contains('nan'),new_id] = np.nan
# Removing test participants
remove = ['050744', 'hdfghadgfh', 'LindaEngel', 'test', 'Test001', 'Test001a', 'test0011', 'test0012', 'test0013', 'test0014', 'test0015', 'test002', 'test00229', 'test007', 'test01', 'test012', 'test013', 'test1', 'test2', 'test4', 'test12', 'test999', 'test2021', 'test345345', 'testneu', 'testtest', 'test_0720', 'test_10', 'test_GA', 'Test_JH','test0016','891752080', 'pipingTest', 'test0001', 'test00012', 'test0012a', 'test0015a', 'test0017', 'test10', 'test20212', 'testJohn01', 'test_00213', 'test_00233', 'test_00271', 'test_003', 'test_004', 'test_11_26', 'Test_MS','898922899', 'tesst', 'test0002', 'test0908', 'test092384750398475', 'test43', 'test123', 'test1233', 'test3425', 'test123456', 'test1234567', 'testfu3', 'test_888', 'test_999', 'test_98375983745', 'Test_Übung','050335', 'test003', 'test02', 'test111', 'test1111', 'test1234','test0000', 'test_CH','50744', 'test0001a', 'test004', 'test03', 'tets']
df = df[~df.participant_id.astype(str).isin(remove)]
# Checking participant ids (to find new test participants)
bad_ids = df[~df.participant_id.apply(self.check_participant_id)].participant_id.unique()
assert len(bad_ids)==0, "Bad participant IDs (should be added to remove): %s"%', '.join(["'%s'"%b for b in bad_ids])
# labeling B07 participant
b07_pps = pd.read_excel(self.b07_participants_path)['Participant ID'].astype(str)
df['is_b07'] = False
df.loc[df.participant_id.isin(b07_pps),'is_b07'] = True
# Setting dtypes based on codebook
df = self.set_dtypes(df, self.get_ba_codebook())
# Creating convenience variables
df['is_female'] = df.screen_gender.replace({1:0,2:1,3:np.nan})
# Filling in missings from baseline
df['is_female'].fillna(df.bx_sozio_gender.replace({1:0,2:1,3:np.nan}), inplace = True)
df['is_female'] = df.groupby('participant_id')['is_female'].transform(lambda x: x.ffill().bfill())
df['is_female'] = df['is_female'].astype(float)
return df
# Cell
@patch
def get_baseline_drinking_data(self:DataProvider):
# Getting relevant data
ba = self.get_ba_data(columns = ['participant_id','redcap_event_name','mov_id','bx_qf_alc_01','bx_qf_alc_02','bx_qf1_sum']).query("redcap_event_name=='erhebungszeitpunkt_arm_1'")
# Correct one variable for one participant. This participant reported drinking per three months but the data as logged as drinking per week
ba.loc[(ba.participant_id=='11303') & (ba.bx_qf_alc_02==2),'bx_qf_alc_02'] = 1
ba['drinking_days_last_three_month'] = ba['bx_qf_alc_01'].astype(float) * ba['bx_qf_alc_02'].replace({2:12})
ba['drinks_per_drinking_day_last_three_month'] = ba['bx_qf1_sum']
ba['drinks_per_day_last_three_month'] = (ba['drinking_days_last_three_month'] * ba['bx_qf1_sum'])/90
standard_last_three = ba[~ba.drinks_per_day_last_three_month.isnull()][['mov_id','drinks_per_day_last_three_month','drinks_per_drinking_day_last_three_month','drinking_days_last_three_month']]
standard_last_three.columns = ['participant','last_three_month','drinks_per_drinking_day_last_three_month','drinking_days_last_three_month']
standard_last_three = standard_last_three.groupby('participant').first()
return standard_last_three
# Cell
@patch
def get_duplicate_mov_ids(self:DataProvider):
'''This function creates a dictionary mapping old to new movisens IDs.'''
df = self.get_ba_data()
replace_dict_1 = dict(zip(df.mov_id_old, df.mov_id))
replace_dict_2 = dict(zip(df.mov_id_old_2, df.mov_id))
replace_dict = {**replace_dict_1, **replace_dict_2}
try:
del replace_dict[np.nan]
except:
pass
del replace_dict[None]
replace_dict['d033'] = 'd092' # This participant's data is currently missing in redcap, but they did change ID from 33 to 92
return replace_dict
# Cell
@patch
@get_efficiently
def get_mov_data(self:DataProvider):
"""
This function gets Movisense data
1) We create unique participnat IDs (e.g. "b001"; this is necessary as sites use overapping IDs)
2) We merge double IDs, so participants with two IDs only have one (for this duplicate_ids.csv has to be updated)
3) We remove pilot participants
4) We get starting dates (from the participant overviews in movisense; downloaded as html)
5) We calculate sampling days and end dates based on the starting dates
"""
# Loading raw data
mov_berlin = pd.read_csv(self.mov_berlin_path, sep = ';')
mov_dresden = pd.read_csv(self.mov_dresden_path, sep = ';')
mov_mannheim = pd.read_csv(self.mov_mannheim_path, sep = ';')
# Merging (participant numbers repeat so we add the first letter of location)
mov_berlin['location'] = 'berlin'
mov_dresden['location'] = 'dresden'
mov_mannheim['location'] = 'mannheim'
df = | pd.concat([mov_berlin,mov_dresden,mov_mannheim]) | pandas.concat |
import os
import pytest
import pandas
from pandas import DataFrame, read_csv
import piperoni as hep
from piperoni.operators.transform.featurize.featurizer import (
CustomFeaturizer,
)
"""
This module implements tests for the CustomFeaturizer.
"""
def multiply_column_by_two(df: DataFrame, col_name: str):
"""Test function for CustomFeaturizer
Parameters
----------
df: DataFrame
The input data.
col_name: str
The column to multiply by two.
Returns
-------
DataFrame
The selected column, but multiplied by two.
"""
result = | DataFrame() | pandas.DataFrame |
import pandas as pd
def read_all_scenes_file() -> list[str]:
with open("resources/all_scenes.txt") as file:
return file.readlines()
def write_actors(actors: list[dict]):
_write_actors("output/data/actors.csv", actors)
def write_transition_actors(transition_actors: list[dict]):
_write_actors("output/data/transition_actors.csv", transition_actors)
def write_spawns(spawns: list[dict]):
_write_actors("output/data/spawns.csv", spawns)
def _write_actors(file_path: str, actors: list[dict]):
| pd.DataFrame(actors) | pandas.DataFrame |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
from amber.plots._plotsV1 import sma
def num_of_val_pos(wd):
managers = [x for x in os.listdir(wd) if x.startswith("manager")]
manager_pos_cnt = {}
for m in managers:
trials = os.listdir(os.path.join(wd, m, "weights"))
pred = pd.read_table(os.path.join(wd, m, "weights", trials[0], "pred.txt"), comment="#")
manager_pos_cnt[m] = pred['obs'].sum()
return manager_pos_cnt
def plot_zs_hist(hist_fp, config_fp, save_prefix, zoom_first_n=None):
zs_hist = pd.read_table(hist_fp, header=None, sep=",")
configs = | pd.read_table(config_fp) | pandas.read_table |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import pandas as pd
from rdkit import Chem
from rdkit.Chem import BRICS
number_of_generating_structures = 100 # 繰り返し 1 回あたり生成する化学構造の数
number_of_iterations = 10 # 繰り返し回数。(number_of_generating_structures × number_of_iterations) 個の化学構造が生成されます
dataset = pd.read_csv('molecules.csv', index_col=0) # 種構造の SMILES のデータセットの読み込み
molecules = [Chem.MolFromSmiles(smiles) for smiles in dataset.iloc[:, 0]]
print('種となる分子の数 :', len(molecules))
# フラグメントへの変換
fragments = set()
for molecule in molecules:
fragment = BRICS.BRICSDecompose(molecule, minFragmentSize=1)
fragments.update(fragment)
print('生成されたフラグメントの数 :', len(fragments))
# 化学構造生成
generated_structures = []
for iteration in range(number_of_iterations):
print(iteration + 1, '/', number_of_iterations)
generated_structures_all = BRICS.BRICSBuild([Chem.MolFromSmiles(fragment) for fragment in fragments])
for index, generated_structure in enumerate(generated_structures_all):
# print(iteration + 1, '/', number_of_iterations, ', ', index + 1, '/', number_of_generating_structures)
generated_structure.UpdatePropertyCache(True)
generated_structures.append(Chem.MolToSmiles(generated_structure))
if index + 1 >= number_of_generating_structures:
break
generated_structures = list(set(generated_structures)) # 重複する構造の削除
generated_structures = | pd.DataFrame(generated_structures, columns=['SMILES']) | pandas.DataFrame |
### preprocessing
"""
code is taken from
tunguz - Surprise Me 2!
https://www.kaggle.com/tunguz/surprise-me-2/code
"""
import glob, re
import numpy as np
import pandas as pd
from sklearn import *
from datetime import datetime
import matplotlib.pyplot as plt
data = {
'tra': pd.read_csv('../input/air_visit_data.csv'),
'as': pd.read_csv('../input/air_store_info.csv'),
'hs': pd.read_csv('../input/hpg_store_info.csv'),
'ar': pd.read_csv('../input/air_reserve.csv'),
'hr': pd.read_csv('../input/hpg_reserve.csv'),
'id': pd.read_csv('../input/store_id_relation.csv'),
'tes': pd.read_csv('../input/sample_submission.csv'),
'hol': pd.read_csv('../input/date_info.csv').rename(columns={'calendar_date':'visit_date'})
}
data['hr'] = pd.merge(data['hr'], data['id'], how='inner', on=['hpg_store_id'])
for df in ['ar','hr']:
data[df]['visit_datetime'] = pd.to_datetime(data[df]['visit_datetime'])
data[df]['visit_dow'] = data[df]['visit_datetime'].dt.dayofweek
data[df]['visit_datetime'] = data[df]['visit_datetime'].dt.date
data[df]['reserve_datetime'] = pd.to_datetime(data[df]['reserve_datetime'])
data[df]['reserve_datetime'] = data[df]['reserve_datetime'].dt.date
data[df]['reserve_datetime_diff'] = data[df].apply(lambda r: (r['visit_datetime'] - r['reserve_datetime']).days, axis=1)
# Exclude same-week reservations
data[df] = data[df][data[df]['reserve_datetime_diff'] > data[df]['visit_dow']]
tmp1 = data[df].groupby(['air_store_id','visit_datetime'], as_index=False)[['reserve_datetime_diff', 'reserve_visitors']].sum().rename(columns={'visit_datetime':'visit_date', 'reserve_datetime_diff': 'rs1', 'reserve_visitors':'rv1'})
tmp2 = data[df].groupby(['air_store_id','visit_datetime'], as_index=False)[['reserve_datetime_diff', 'reserve_visitors']].mean().rename(columns={'visit_datetime':'visit_date', 'reserve_datetime_diff': 'rs2', 'reserve_visitors':'rv2'})
data[df] = pd.merge(tmp1, tmp2, how='inner', on=['air_store_id','visit_date'])
data['tra']['visit_date'] = pd.to_datetime(data['tra']['visit_date'])
data['tra']['dow'] = data['tra']['visit_date'].dt.dayofweek
data['tra']['doy'] = data['tra']['visit_date'].dt.dayofyear
data['tra']['year'] = data['tra']['visit_date'].dt.year
data['tra']['month'] = data['tra']['visit_date'].dt.month
data['tra']['week'] = data['tra']['visit_date'].dt.week
data['tra']['visit_date'] = data['tra']['visit_date'].dt.date
data['tes']['visit_date'] = data['tes']['id'].map(lambda x: str(x).split('_')[2])
data['tes']['air_store_id'] = data['tes']['id'].map(lambda x: '_'.join(x.split('_')[:2]))
data['tes']['visit_date'] = pd.to_datetime(data['tes']['visit_date'])
data['tes']['dow'] = data['tes']['visit_date'].dt.dayofweek
data['tes']['doy'] = data['tes']['visit_date'].dt.dayofyear
data['tes']['year'] = data['tes']['visit_date'].dt.year
data['tes']['month'] = data['tes']['visit_date'].dt.month
data['tes']['week'] = data['tes']['visit_date'].dt.week
data['tes']['visit_date'] = data['tes']['visit_date'].dt.date
unique_stores = data['tes']['air_store_id'].unique()
stores = pd.concat([pd.DataFrame({'air_store_id': unique_stores, 'dow': [i]*len(unique_stores)}) for i in range(7)], axis=0, ignore_index=True).reset_index(drop=True)
#sure it can be compressed...
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].min().rename(columns={'visitors':'min_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].mean().rename(columns={'visitors':'mean_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].median().rename(columns={'visitors':'median_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
#tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].max().rename(columns={'visitors':'max_visitors'})
#stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = data['tra'].groupby(['air_store_id','dow'], as_index=False)['visitors'].count().rename(columns={'visitors':'count_observations'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
stores = pd.merge(stores, data['as'], how='left', on=['air_store_id'])
# NEW FEATURES FROM <NAME>
stores['air_genre_name'] = stores['air_genre_name'].map(lambda x: str(str(x).replace('/',' ')))
stores['air_area_name'] = stores['air_area_name'].map(lambda x: str(str(x).replace('-',' ')))
lbl = preprocessing.LabelEncoder()
for i in range(10):
stores['air_genre_name'+str(i)] = lbl.fit_transform(stores['air_genre_name'].map(lambda x: str(str(x).split(' ')[i]) if len(str(x).split(' '))>i else ''))
stores['air_area_name'+str(i)] = lbl.fit_transform(stores['air_area_name'].map(lambda x: str(str(x).split(' ')[i]) if len(str(x).split(' '))>i else ''))
stores['air_genre_name'] = lbl.fit_transform(stores['air_genre_name'])
stores['air_area_name'] = lbl.fit_transform(stores['air_area_name'])
data['hol']['visit_date'] = pd.to_datetime(data['hol']['visit_date'])
data['hol']['day_of_week'] = lbl.fit_transform(data['hol']['day_of_week'])
data['hol']['visit_date'] = data['hol']['visit_date'].dt.date
train = pd.merge(data['tra'], data['hol'], how='left', on=['visit_date'])
test = pd.merge(data['tes'], data['hol'], how='left', on=['visit_date'])
train = pd.merge(train, stores, how='inner', on=['air_store_id','dow'])
test = pd.merge(test, stores, how='left', on=['air_store_id','dow'])
for df in ['ar','hr']:
train = | pd.merge(train, data[df], how='left', on=['air_store_id','visit_date']) | pandas.merge |
from PyQt5 import QtWidgets as Qtw
from PyQt5 import QtCore as Qtc
from PyQt5 import QtGui as Qtg
from datetime import datetime, timedelta
from bu_data_model import BU366
import sys
import socket
import time
import pandas as pd
from openpyxl.chart import ScatterChart, Reference, Series
class CheckingThread(Qtc.QThread):
answer_thread = Qtc.pyqtSignal(str, list)
running_state = Qtc.pyqtSignal(str)
remaining_time = Qtc.pyqtSignal(str)
error_threads = Qtc.pyqtSignal(str)
running_threads = {}
def __init__(self, threadid_, n366, total_time, polling_interval, poll_rest):
Qtc.QThread.__init__(self)
self.threadid = threadid_
self.name = n366.name
self.n366 = n366
self.total_time = total_time
self.polling_inteval = polling_interval
self.poll_rest = timedelta(seconds=poll_rest)
self.next_poll = datetime.now()
self.end = datetime.now() + timedelta(minutes=total_time)
self.poll_rest_flag = False
if poll_rest > 0:
self.poll_rest_flag = True
def run(self): # we run iterating over time until the test is over
time_ = datetime.now() # get the time now for the loop
self.running_threads[f'{self.name}'] = self # we add the object to the queue to be able to stop it later
while time_ < self.end: # main loop until end time is bigger than current time
self.remaining_time.emit(f'{self.end - time_}') # we update the remaining time of the test via signal
self.running_state.emit('°R') # we update the status to °R via a signal
try: # we check if the conection is active
self.n366.check_active() # here we poll the DN and get the values to the dataframes
except: # try to reconnect
self.running_state.emit('°RC')
while not self.n366.connection_state and datetime.now() < self.end: # while there is no connection we try to reconnect
for tu_name_disconnect in self.n366.tus: # updates display to show the disconnection status
tu_item = self.n366.tus[tu_name_disconnect]
self.answer_thread.emit(tu_name_disconnect, [ # emit list with values
-1, # set local sector
-100, # set RSSI
0, # set SNR
0, # set RXMCS
0, # set TXMCS
0, # set RX PER
0, # set TX PER
0, # set RX MCS DR
0, # set TX MCS DR
tu_item.get_availability(),
tu_item.get_disconnection_counter(),
tu_item.get_disconnection_ldt(),
tu_item.get_disconnection_lds(),
tu_item.get_disconnection_tdt(),
False,
0,
])
self.n366.connect(self.n366.username, self.n366.password, 22, 1)
# mini loop to fill in the disconnection time for each TU. We get disconnection start from object
# and disconnection end at that time
except_disconnection_start = self.n366.disconnection_start
except_disconnection_end = datetime.now()
while except_disconnection_start < except_disconnection_end: # while there is time between both events
for tu_name_reconnect in self.n366.tus: # updates display to show the disconnection status
except_tu_item = self.n366.tus[tu_name_reconnect] # get each TU
# create a record with the disconnection parameters
record = {'Local Sector': 0, 'RSSI': -100, 'SNR': 0, 'MCS-RX': 0, 'MCS-TX': 0,
'MCS-DR-RX': 0, 'MCS-DR-TX': 0, 'Power Index': 0}
record_series = | pd.Series(record, name=except_disconnection_start) | pandas.Series |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import is_scalar
from pandas.compat import to_str, string_types, numpy as numpy_compat, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import (
_get_dtype_from_object,
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_object_dtype,
is_integer_dtype,
)
from pandas.core.index import _ensure_index_from_sequences
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.util._validators import validate_bool_kwarg
import itertools
import functools
import numpy as np
import re
import sys
import warnings
from modin.error_message import ErrorMessage
from .utils import from_pandas, to_pandas, _inherit_docstrings
from .iterator import PartitionIterator
from .series import SeriesView
@_inherit_docstrings(
pandas.DataFrame, excluded=[pandas.DataFrame, pandas.DataFrame.__init__]
)
class DataFrame(object):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
data (numpy ndarray (structured or homogeneous) or dict):
Dict can contain Series, arrays, constants, or list-like
objects.
index (pandas.Index, list, ObjectID): The row index for this
DataFrame.
columns (pandas.Index): The column names for this DataFrame, in
pandas Index object.
dtype: Data type to force. Only a single dtype is allowed.
If None, infer
copy (boolean): Copy data from inputs.
Only affects DataFrame / 2d ndarray input.
query_compiler: A query compiler object to manage distributed computation.
"""
if isinstance(data, DataFrame):
self._query_compiler = data._query_compiler
return
# Check type of data and use appropriate constructor
if data is not None or query_compiler is None:
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __str__(self):
return repr(self)
def _build_repr_df(self, num_rows, num_cols):
# Add one here so that pandas automatically adds the dots
# It turns out to be faster to extract 2 extra rows and columns than to
# build the dots ourselves.
num_rows_for_head = num_rows // 2 + 1
num_cols_for_front = num_cols // 2 + 1
if len(self.index) <= num_rows:
head = self._query_compiler
tail = None
else:
head = self._query_compiler.head(num_rows_for_head)
tail = self._query_compiler.tail(num_rows_for_head)
if len(self.columns) <= num_cols:
head_front = head.to_pandas()
# Creating these empty to make the concat logic simpler
head_back = pandas.DataFrame()
tail_back = pandas.DataFrame()
if tail is not None:
tail_front = tail.to_pandas()
else:
tail_front = pandas.DataFrame()
else:
head_front = head.front(num_cols_for_front).to_pandas()
head_back = head.back(num_cols_for_front).to_pandas()
if tail is not None:
tail_front = tail.front(num_cols_for_front).to_pandas()
tail_back = tail.back(num_cols_for_front).to_pandas()
else:
tail_front = tail_back = pandas.DataFrame()
head_for_repr = pandas.concat([head_front, head_back], axis=1)
tail_for_repr = pandas.concat([tail_front, tail_back], axis=1)
return pandas.concat([head_for_repr, tail_for_repr])
def __repr__(self):
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 30
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self):
"""repr function for rendering in Jupyter Notebooks like Pandas
Dataframes.
Returns:
The HTML representation of a Dataframe.
"""
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.index
def _get_columns(self):
"""Get the columns for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.index = new_index
def _set_columns(self, new_columns):
"""Set the columns for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.columns = new_columns
index = property(_get_index, _set_index)
columns = property(_get_columns, _set_columns)
def _validate_eval_query(self, expr, **kwargs):
"""Helper function to check the arguments to eval() and query()
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
"""
if isinstance(expr, str) and expr is "":
raise ValueError("expr cannot be an empty string")
if isinstance(expr, str) and "@" in expr:
ErrorMessage.not_implemented("Local variables not yet supported in eval.")
if isinstance(expr, str) and "not" in expr:
if "parser" in kwargs and kwargs["parser"] == "python":
ErrorMessage.not_implemented("'Not' nodes are not implemented.")
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
dtypes = self.dtypes.copy()
ftypes = ["{0}:dense".format(str(dtype)) for dtype in dtypes.values]
result = pandas.Series(ftypes, index=self.columns)
return result
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
return self._query_compiler.dtypes
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
return len(self.columns) == 0 or len(self.index) == 0
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return to_pandas(self).values
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return len(self.index), len(self.columns)
def _update_inplace(self, new_query_compiler):
"""Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
"""
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
old_query_compiler.free()
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(query_compiler=self._query_compiler.copy())
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
**kwargs
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
idx_name = ""
if callable(by):
by = by(self.index)
elif isinstance(by, string_types):
idx_name = by
by = self.__getitem__(by).values.tolist()
elif is_list_like(by):
if isinstance(by, pandas.Series):
by = by.values.tolist()
mismatch = (
len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
)
if all(obj in self for obj in by) and mismatch:
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
**kwargs
)
def sum(
self,
axis=None,
skipna=True,
level=None,
numeric_only=None,
min_count=0,
**kwargs
):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.sum(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def abs(self):
"""Apply an absolute value function to all numeric columns.
Returns:
A new DataFrame with the applied absolute value.
"""
self._validate_dtypes(numeric_only=True)
return DataFrame(query_compiler=self._query_compiler.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return DataFrame(query_compiler=self._query_compiler.isin(values=values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isnull())
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Returns:
A new DataFrame transposed from this DataFrame.
"""
return DataFrame(query_compiler=self._query_compiler.transpose(*args, **kwargs))
T = property(transpose)
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_list_like(axis):
axis = [pandas.DataFrame()._get_axis_number(ax) for ax in axis]
result = self
for ax in axis:
result = result.dropna(axis=ax, how=how, thresh=thresh, subset=subset)
return self._create_dataframe_from_compiler(result._query_compiler, inplace)
axis = pandas.DataFrame()._get_axis_number(axis)
if how is not None and how not in ["any", "all"]:
raise ValueError("invalid how option: %s" % how)
if how is None and thresh is None:
raise TypeError("must specify how or thresh")
if subset is not None:
if axis == 1:
indices = self.index.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
else:
indices = self.columns.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
new_query_compiler = self._query_compiler.dropna(
axis=axis, how=how, thresh=thresh, subset=subset
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def add(self, other, axis="columns", level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.add,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_or_object_only=True)
new_query_compiler = self._query_compiler.add(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def agg(self, func, axis=0, *args, **kwargs):
return self.aggregate(func, axis, *args, **kwargs)
def aggregate(self, func, axis=0, *args, **kwargs):
axis = pandas.DataFrame()._get_axis_number(axis)
result = None
if axis == 0:
try:
result = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
kwargs.pop("is_transform", None)
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, *args, **kwargs):
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(self, "axis", 0)
kwargs.pop("_level", None)
if isinstance(arg, string_types):
return self._string_function(arg, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif isinstance(arg, dict):
return self._default_to_pandas(pandas.DataFrame.agg, arg, *args, **kwargs)
elif is_list_like(arg) or callable(arg):
return self.apply(arg, axis=_axis, args=args, **kwargs)
else:
# TODO Make pandas error
raise ValueError("type {} is not callable".format(type(arg)))
def _string_function(self, func, *args, **kwargs):
assert isinstance(func, string_types)
f = getattr(self, func, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
assert len(args) == 0
assert (
len([kwarg for kwarg in kwargs if kwarg not in ["axis", "_level"]]) == 0
)
return f
f = getattr(np, func, None)
if f is not None:
return self._default_to_pandas(pandas.DataFrame.agg, func, *args, **kwargs)
raise ValueError("{} is an unknown string function".format(func))
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.align,
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
def all(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.all(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def any(self, axis=0, bool_only=None, skipna=None, level=None, **kwargs):
"""Return whether any elements are True over requested axis
Note:
If axis=None or axis=0, this call applies on the column partitions,
otherwise operates on row partitions
"""
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
else:
if bool_only:
raise ValueError("Axis must be 0 or 1 (got {})".format(axis))
return self._query_compiler.any(
axis=axis, bool_only=bool_only, skipna=skipna, level=level, **kwargs
)
def append(self, other, ignore_index=False, verify_integrity=False, sort=None):
"""Append another DataFrame/list/Series to this one.
Args:
other: The object to append to this.
ignore_index: Ignore the index on appending.
verify_integrity: Verify the integrity of the index on completion.
Returns:
A new DataFrame containing the concatenated values.
"""
if isinstance(other, (pandas.Series, dict)):
if isinstance(other, dict):
other = pandas.Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = pandas.Index([other.name], name=self.index.name)
# Create a Modin DataFrame from this Series for ease of development
other = DataFrame(pandas.DataFrame(other).T, index=index)._query_compiler
elif isinstance(other, list):
if not isinstance(other[0], DataFrame):
other = pandas.DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = DataFrame(other.loc[:, self.columns])._query_compiler
else:
other = DataFrame(other)._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
appended_index = self.index.append(other.index)
is_valid = next((False for idx in appended_index.duplicated() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".format(
appended_index[appended_index.duplicated()]
)
)
query_compiler = self._query_compiler.concat(
0, other, ignore_index=ignore_index, sort=sort
)
return DataFrame(query_compiler=query_compiler)
def apply(
self, func, axis=0, broadcast=False, raw=False, reduce=None, args=(), **kwds
):
"""Apply a function along input axis of DataFrame.
Args:
func: The function to apply
axis: The axis over which to apply the func.
broadcast: Whether or not to broadcast.
raw: Whether or not to convert to a Series.
reduce: Whether or not to try to apply reduction procedures.
Returns:
Series or DataFrame, depending on func.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
ErrorMessage.non_verified_udf()
if isinstance(func, string_types):
if axis == 1:
kwds["axis"] = axis
return getattr(self, func)(*args, **kwds)
elif isinstance(func, dict):
if axis == 1:
raise TypeError(
"(\"'dict' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
if len(self.columns) != len(set(self.columns)):
warnings.warn(
"duplicate column names not supported with apply().",
FutureWarning,
stacklevel=2,
)
elif is_list_like(func):
if axis == 1:
raise TypeError(
"(\"'list' object is not callable\", "
"'occurred at index {0}'".format(self.index[0])
)
elif not callable(func):
return
query_compiler = self._query_compiler.apply(func, axis, *args, **kwds)
if isinstance(query_compiler, pandas.Series):
return query_compiler
return DataFrame(query_compiler=query_compiler)
def as_blocks(self, copy=True):
return self._default_to_pandas(pandas.DataFrame.as_blocks, copy=copy)
def as_matrix(self, columns=None):
"""Convert the frame to its Numpy-array representation.
Args:
columns: If None, return all columns, otherwise,
returns specified columns.
Returns:
values: ndarray
"""
# TODO this is very inefficient, also see __array__
return to_pandas(self).as_matrix(columns)
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
return self._default_to_pandas(
pandas.DataFrame.asfreq,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def asof(self, where, subset=None):
return self._default_to_pandas(pandas.DataFrame.asof, where, subset=subset)
def assign(self, **kwargs):
return self._default_to_pandas(pandas.DataFrame.assign, **kwargs)
def astype(self, dtype, copy=True, errors="raise", **kwargs):
col_dtypes = {}
if isinstance(dtype, dict):
if not set(dtype.keys()).issubset(set(self.columns)) and errors == "raise":
raise KeyError(
"Only a column name can be used for the key in"
"a dtype mappings argument."
)
col_dtypes = dtype
else:
for column in self.columns:
col_dtypes[column] = dtype
new_query_compiler = self._query_compiler.astype(col_dtypes, **kwargs)
return self._create_dataframe_from_compiler(new_query_compiler, not copy)
def at_time(self, time, asof=False):
return self._default_to_pandas(pandas.DataFrame.at_time, time, asof=asof)
def between_time(self, start_time, end_time, include_start=True, include_end=True):
return self._default_to_pandas(
pandas.DataFrame.between_time,
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='bfill')"""
new_df = self.fillna(
method="bfill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError(
"""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all()."""
)
else:
return to_pandas(self).bool()
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwargs
):
return to_pandas(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs
)
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs):
# validate inputs
if axis is not None:
axis = pandas.DataFrame()._get_axis_number(axis)
self._validate_dtypes(numeric_only=True)
if is_list_like(lower) or is_list_like(upper):
if axis is None:
raise ValueError("Must specify axis = 0 or 1")
self._validate_other(lower, axis)
self._validate_other(upper, axis)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = numpy_compat.function.validate_clip_with_axis(axis, args, kwargs)
# any np.nan bounds are treated as None
if lower is not None and np.any(np.isnan(lower)):
lower = None
if upper is not None and np.any(np.isnan(upper)):
upper = None
new_query_compiler = self._query_compiler.clip(
lower=lower, upper=upper, axis=axis, inplace=inplace, *args, **kwargs
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def clip_lower(self, threshold, axis=None, inplace=False):
return self.clip(lower=threshold, axis=axis, inplace=inplace)
def clip_upper(self, threshold, axis=None, inplace=False):
return self.clip(upper=threshold, axis=axis, inplace=inplace)
def combine(self, other, func, fill_value=None, overwrite=True):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.combine,
other,
func,
fill_value=fill_value,
overwrite=overwrite,
)
def combine_first(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.combine_first, other=other)
def compound(self, axis=None, skipna=None, level=None):
return self._default_to_pandas(
pandas.DataFrame.compound, axis=axis, skipna=skipna, level=level
)
def consolidate(self, inplace=False):
return self._default_to_pandas(pandas.DataFrame.consolidate, inplace=inplace)
def convert_objects(
self,
convert_dates=True,
convert_numeric=False,
convert_timedeltas=True,
copy=True,
):
return self._default_to_pandas(
pandas.DataFrame.convert_objects,
convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy,
)
def corr(self, method="pearson", min_periods=1):
return self._default_to_pandas(
pandas.DataFrame.corr, method=method, min_periods=min_periods
)
def corrwith(self, other, axis=0, drop=False):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.corrwith, other, axis=axis, drop=drop
)
def count(self, axis=0, level=None, numeric_only=False):
"""Get the count of non-null objects in the DataFrame.
Arguments:
axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
level: If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame.
numeric_only: Include only float, int, boolean data
Returns:
The count, in a Series (or DataFrame if level is specified).
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return self._query_compiler.count(
axis=axis, level=level, numeric_only=numeric_only
)
def cov(self, min_periods=None):
return self._default_to_pandas(pandas.DataFrame.cov, min_periods=min_periods)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative maximum across the DataFrame.
Args:
axis (int): The axis to take maximum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative maximum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummax(
axis=axis, skipna=skipna, **kwargs
)
)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative minimum across the DataFrame.
Args:
axis (int): The axis to cummin on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative minimum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if axis:
self._validate_dtypes()
return DataFrame(
query_compiler=self._query_compiler.cummin(
axis=axis, skipna=skipna, **kwargs
)
)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative product across the DataFrame.
Args:
axis (int): The axis to take product on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative product of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumprod(
axis=axis, skipna=skipna, **kwargs
)
)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative sum across the DataFrame.
Args:
axis (int): The axis to take sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes(numeric_only=True)
return DataFrame(
query_compiler=self._query_compiler.cumsum(
axis=axis, skipna=skipna, **kwargs
)
)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Args:
percentiles (list-like of numbers, optional):
The percentiles to include in the output.
include: White-list of data types to include in results
exclude: Black-list of data types to exclude in results
Returns: Series/DataFrame of summary statistics
"""
if include is not None:
if not is_list_like(include):
include = [include]
include = [np.dtype(i) for i in include]
if exclude is not None:
if not is_list_like(include):
exclude = [exclude]
exclude = [np.dtype(e) for e in exclude]
if percentiles is not None:
pandas.DataFrame()._check_percentile(percentiles)
return DataFrame(
query_compiler=self._query_compiler.describe(
percentiles=percentiles, include=include, exclude=exclude
)
)
def diff(self, periods=1, axis=0):
"""Finds the difference between elements on the axis requested
Args:
periods: Periods to shift for forming difference
axis: Take difference over rows or columns
Returns:
DataFrame with the diff applied
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.diff(periods=periods, axis=axis)
)
def div(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.div,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.div(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def divide(self, other, axis="columns", level=None, fill_value=None):
"""Synonym for div.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self.div(other, axis, level, fill_value)
def dot(self, other):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(pandas.DataFrame.dot, other)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""Return new object with labels in requested axis removed.
Args:
labels: Index or column labels to drop.
axis: Whether to drop labels from the index (0 / 'index') or
columns (1 / 'columns').
index, columns: Alternative to specifying axis (labels, axis=1 is
equivalent to columns=labels).
level: For MultiIndex
inplace: If True, do operation inplace and return None.
errors: If 'ignore', suppress error and existing labels are
dropped.
Returns:
dropped : type of caller
"""
# TODO implement level
if level is not None:
return self._default_to_pandas(
pandas.DataFrame.drop,
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis = pandas.DataFrame()._get_axis_name(axis)
axes = {axis: labels}
elif index is not None or columns is not None:
axes, _ = pandas.DataFrame()._construct_axes_from_arguments(
(index, columns), {}
)
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
# TODO Clean up this error checking
if "index" not in axes:
axes["index"] = None
elif axes["index"] is not None:
if not is_list_like(axes["index"]):
axes["index"] = [axes["index"]]
if errors == "raise":
non_existant = [obj for obj in axes["index"] if obj not in self.index]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["index"] = [obj for obj in axes["index"] if obj in self.index]
# If the length is zero, we will just do nothing
if not len(axes["index"]):
axes["index"] = None
if "columns" not in axes:
axes["columns"] = None
elif axes["columns"] is not None:
if not is_list_like(axes["columns"]):
axes["columns"] = [axes["columns"]]
if errors == "raise":
non_existant = [
obj for obj in axes["columns"] if obj not in self.columns
]
if len(non_existant):
raise ValueError(
"labels {} not contained in axis".format(non_existant)
)
else:
axes["columns"] = [
obj for obj in axes["columns"] if obj in self.columns
]
# If the length is zero, we will just do nothing
if not len(axes["columns"]):
axes["columns"] = None
new_query_compiler = self._query_compiler.drop(
index=axes["index"], columns=axes["columns"]
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def drop_duplicates(self, subset=None, keep="first", inplace=False):
return self._default_to_pandas(
pandas.DataFrame.drop_duplicates, subset=subset, keep=keep, inplace=inplace
)
def duplicated(self, subset=None, keep="first"):
return self._default_to_pandas(
pandas.DataFrame.duplicated, subset=subset, keep=keep
)
def eq(self, other, axis="columns", level=None):
"""Checks element-wise that this is equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the eq over.
level: The Multilevel index level to apply eq over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.eq, other, axis=axis, level=level
)
other = self._validate_other(other, axis)
new_query_compiler = self._query_compiler.eq(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
if isinstance(other, pandas.DataFrame):
# Copy into a Ray DataFrame to simplify logic below
other = DataFrame(other)
if not self.index.equals(other.index) or not self.columns.equals(other.columns):
return False
return all(self.eq(other).all())
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate a Python expression as a string using various backends.
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
parser: The parser to use to construct the syntax tree from the
expression. The default of 'pandas' parses code slightly
different than standard Python. Alternatively, you can parse
an expression using the 'python' parser to retain strict
Python semantics. See the enhancing performance documentation
for more details.
engine: The engine used to evaluate the expression.
truediv: Whether to use true division, like in Python >= 3
local_dict: A dictionary of local variables, taken from locals()
by default.
global_dict: A dictionary of global variables, taken from
globals() by default.
resolvers: A list of objects implementing the __getitem__ special
method that you can use to inject an additional collection
of namespaces to use for variable lookup. For example, this is
used in the query() method to inject the index and columns
variables that refer to their respective DataFrame instance
attributes.
level: The number of prior stack frames to traverse and add to
the current scope. Most users will not need to change this
parameter.
target: This is the target object for assignment. It is used when
there is variable assignment in the expression. If so, then
target must support item assignment with string keys, and if a
copy is being returned, it must also support .copy().
inplace: If target is provided, and the expression mutates target,
whether to modify target inplace. Otherwise, return a copy of
target with the mutation.
Returns:
ndarray, numeric scalar, DataFrame, Series
"""
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
if isinstance(new_query_compiler, pandas.Series):
return new_query_compiler
else:
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
freq=None,
adjust=True,
ignore_na=False,
axis=0,
):
return self._default_to_pandas(
pandas.DataFrame.ewm,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
return self._default_to_pandas(
pandas.DataFrame.expanding,
min_periods=min_periods,
freq=freq,
center=center,
axis=axis,
)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='ffill')
"""
new_df = self.fillna(
method="ffill", axis=axis, limit=limit, downcast=downcast, inplace=inplace
)
if not inplace:
return new_df
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
**kwargs
):
"""Fill NA/NaN values using the specified method.
Args:
value: Value to use to fill holes. This value cannot be a list.
method: Method to use for filling holes in reindexed Series pad.
ffill: propagate last valid observation forward to next valid
backfill.
bfill: use NEXT valid observation to fill gap.
axis: 0 or 'index', 1 or 'columns'.
inplace: If True, fill in place. Note: this will modify any other
views on this object.
limit: If method is specified, this is the maximum number of
consecutive NaN values to forward/backward fill. In other
words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method
is not specified, this is the maximum number of entries along
the entire axis where NaNs will be filled. Must be greater
than 0 if not None.
downcast: A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an
appropriate equal type.
Returns:
filled: DataFrame
"""
# TODO implement value passed as DataFrame
if isinstance(value, pandas.DataFrame) or isinstance(value, pandas.Series):
new_query_compiler = self._default_to_pandas(
pandas.DataFrame.fillna,
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs
)._query_compiler
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
inplace = validate_bool_kwarg(inplace, "inplace")
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if isinstance(value, (list, tuple)):
raise TypeError(
'"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__)
)
if value is None and method is None:
raise ValueError("must specify a fill method or value")
if value is not None and method is not None:
raise ValueError("cannot specify both a fill method and value")
if method is not None and method not in ["backfill", "bfill", "pad", "ffill"]:
expecting = "pad (ffill) or backfill (bfill)"
msg = "Invalid fill method. Expecting {expecting}. Got {method}".format(
expecting=expecting, method=method
)
raise ValueError(msg)
new_query_compiler = self._query_compiler.fillna(
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def filter(self, items=None, like=None, regex=None, axis=None):
"""Subset rows or columns based on their labels
Args:
items (list): list of labels to subset
like (string): retain labels where `arg in label == True`
regex (string): retain labels matching regex input
axis: axis to filter on
Returns:
A new DataFrame with the filter applied.
"""
nkw = com._count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if nkw == 0:
raise TypeError("Must pass either `items`, `like`, or `regex`")
if axis is None:
axis = "columns" # This is the default info axis for dataframes
axis = pandas.DataFrame()._get_axis_number(axis)
labels = self.columns if axis else self.index
if items is not None:
bool_arr = labels.isin(items)
elif like is not None:
def f(x):
return like in to_str(x)
bool_arr = labels.map(f).tolist()
else:
def f(x):
return matcher.search(to_str(x)) is not None
matcher = re.compile(regex)
bool_arr = labels.map(f).tolist()
if not axis:
return self[bool_arr]
return self[self.columns[bool_arr]]
def first(self, offset):
return self._default_to_pandas(pandas.DataFrame.first, offset)
def first_valid_index(self):
"""Return index for first non-NA/null value.
Returns:
scalar: type of index
"""
return self._query_compiler.first_valid_index()
def floordiv(self, other, axis="columns", level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.floordiv,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.floordiv(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
@classmethod
def from_csv(
cls,
path,
header=0,
sep=", ",
index_col=0,
parse_dates=True,
encoding=None,
tupleize_cols=None,
infer_datetime_format=False,
):
from .io import read_csv
return read_csv(
path,
header=header,
sep=sep,
index_col=index_col,
parse_dates=parse_dates,
encoding=encoding,
tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format,
)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None):
ErrorMessage.default_to_pandas()
return from_pandas(pandas.DataFrame.from_dict(data, orient=orient, dtype=dtype))
@classmethod
def from_items(cls, items, columns=None, orient="columns"):
ErrorMessage.default_to_pandas()
return from_pandas(
pandas.DataFrame.from_items(items, columns=columns, orient=orient)
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
):
ErrorMessage.default_to_pandas()
return from_pandas(
pandas.DataFrame.from_records(
data,
index=index,
exclude=exclude,
columns=columns,
coerce_float=coerce_float,
nrows=nrows,
)
)
def ge(self, other, axis="columns", level=None):
"""Checks element-wise that this is greater than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.ge, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.ge(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def get(self, key, default=None):
"""Get item from object for given key (DataFrame column, Panel
slice, etc.). Returns default value if not found.
Args:
key (DataFrame column, Panel slice) : the key for which value
to get
Returns:
value (type of items contained in object) : A value that is
stored at the key
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
result = self.dtypes.value_counts()
result.index = result.index.map(lambda x: str(x))
return result
def get_ftype_counts(self):
"""Get the counts of ftypes in this object.
Returns:
The counts of ftypes in this object.
"""
return self.ftypes.value_counts().sort_index()
def get_value(self, index, col, takeable=False):
return self._default_to_pandas(
pandas.DataFrame.get_value, index, col, takeable=takeable
)
def get_values(self):
return self._default_to_pandas(pandas.DataFrame.get_values)
def gt(self, other, axis="columns", level=None):
"""Checks element-wise that this is greater than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.gt, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.gt(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def head(self, n=5):
"""Get the first n rows of the DataFrame.
Args:
n (int): The number of rows to return.
Returns:
A new DataFrame with the first n rows of the DataFrame.
"""
if n >= len(self.index):
return self.copy()
return DataFrame(query_compiler=self._query_compiler.head(n))
def hist(
self,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
**kwargs
):
return self._default_to_pandas(
pandas.DataFrame.hist,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
**kwargs
)
def idxmax(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the max value of the axis.
Args:
axis (int): Identify the max over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each maximum value for the axis
specified.
"""
if not all(d != np.dtype("O") for d in self.dtypes):
raise TypeError("reduction operation 'argmax' not allowed for this dtype")
return self._query_compiler.idxmax(axis=axis, skipna=skipna)
def idxmin(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the min value of the axis.
Args:
axis (int): Identify the min over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each minimum value for the axis
specified.
"""
if not all(d != np.dtype("O") for d in self.dtypes):
raise TypeError("reduction operation 'argmax' not allowed for this dtype")
return self._query_compiler.idxmin(axis=axis, skipna=skipna)
def infer_objects(self):
return self._default_to_pandas(pandas.DataFrame.infer_objects)
def info(
self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
):
"""Print a concise summary of a DataFrame, which includes the index
dtype and column dtypes, non-null values and memory usage.
Args:
verbose (bool, optional): Whether to print the full summary. Defaults
to true
buf (writable buffer): Where to send output. Defaults to sys.stdout
max_cols (int, optional): When to switch from verbose to truncated
output. By defualt, this is 100.
memory_usage (bool, str, optional): Specifies whether the total memory
usage of the DataFrame elements (including index) should be displayed.
True always show memory usage. False never shows memory usage. A value
of 'deep' is equivalent to "True with deep introspection". Memory usage
is shown in human-readable units (base-2 representation). Without deep
introspection a memory estimation is made based in column dtype and
number of rows assuming values consume the same memory amount for
corresponding dtypes. With deep memory introspection, a real memory
usage calculation is performed at the cost of computational resources.
Defaults to True.
null_counts (bool, optional): Whetehr to show the non-null counts. By
default, this is shown only when the frame is smaller than 100 columns
and 1690785 rows. A value of True always shows the counts and False
never shows the counts.
Returns:
Prints the summary of a DataFrame and returns None.
"""
# We will default to pandas because it will be faster than doing two passes
# over the data
buf = sys.stdout if not buf else buf
import io
with io.StringIO() as tmp_buf:
self._default_to_pandas(
pandas.DataFrame.info,
verbose=verbose,
buf=tmp_buf,
max_cols=max_cols,
memory_usage=memory_usage,
null_counts=null_counts,
)
result = tmp_buf.getvalue()
result = result.replace(
"pandas.core.frame.DataFrame", "modin.pandas.dataframe.DataFrame"
)
buf.write(result)
return None
index = self.index
columns = self.columns
dtypes = self.dtypes
# Set up default values
verbose = True if verbose is None else verbose
buf = sys.stdout if not buf else buf
max_cols = 100 if not max_cols else max_cols
memory_usage = True if memory_usage is None else memory_usage
if not null_counts:
if len(columns) < 100 and len(index) < 1690785:
null_counts = True
else:
null_counts = False
# Determine if actually verbose
actually_verbose = True if verbose and max_cols > len(columns) else False
if type(memory_usage) == str and memory_usage == "deep":
memory_usage_deep = True
else:
memory_usage_deep = False
# Start putting together output
# Class denoted in info() output
class_string = "<class 'modin.pandas.dataframe.DataFrame'>\n"
# Create the Index info() string by parsing self.index
index_string = index.summary() + "\n"
if null_counts:
counts = self._query_compiler.count()
if memory_usage:
memory_usage_data = self._query_compiler.memory_usage(
deep=memory_usage_deep, index=True
)
if actually_verbose:
# Create string for verbose output
col_string = "Data columns (total {0} columns):\n".format(len(columns))
for col, dtype in zip(columns, dtypes):
col_string += "{0}\t".format(col)
if null_counts:
col_string += "{0} not-null ".format(counts[col])
col_string += "{0}\n".format(dtype)
else:
# Create string for not verbose output
col_string = "Columns: {0} entries, {1} to {2}\n".format(
len(columns), columns[0], columns[-1]
)
# A summary of the dtypes in the dataframe
dtypes_string = "dtypes: "
for dtype, count in dtypes.value_counts().iteritems():
dtypes_string += "{0}({1}),".format(dtype, count)
dtypes_string = dtypes_string[:-1] + "\n"
# Create memory usage string
memory_string = ""
if memory_usage:
if memory_usage_deep:
memory_string = "memory usage: {0} bytes".format(memory_usage_data)
else:
memory_string = "memory usage: {0}+ bytes".format(memory_usage_data)
# Combine all the components of the info() output
result = "".join(
[class_string, index_string, col_string, dtypes_string, memory_string]
)
# Write to specified output buffer
buf.write(result)
def insert(self, loc, column, value, allow_duplicates=False):
"""Insert column into DataFrame at specified location.
Args:
loc (int): Insertion index. Must verify 0 <= loc <= len(columns).
column (hashable object): Label of the inserted column.
value (int, Series, or array-like): The values to insert.
allow_duplicates (bool): Whether to allow duplicate column names.
"""
if isinstance(value, (DataFrame, pandas.DataFrame)):
if len(value.columns) != 1:
raise ValueError("Wrong number of items passed 2, placement implies 1")
value = value.iloc[:, 0]
if len(self.index) == 0:
try:
value = pandas.Series(value)
except (TypeError, ValueError, IndexError):
raise ValueError(
"Cannot insert into a DataFrame with no defined index "
"and a value that cannot be converted to a "
"Series"
)
new_index = value.index.copy()
new_columns = self.columns.insert(loc, column)
new_query_compiler = DataFrame(
value, index=new_index, columns=new_columns
)._query_compiler
else:
if not is_list_like(value):
value = np.full(len(self.index), value)
if not isinstance(value, pandas.Series) and len(value) != len(self.index):
raise ValueError("Length of values does not match length of index")
if not allow_duplicates and column in self.columns:
raise ValueError("cannot insert {0}, already exists".format(column))
if loc > len(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".format(
loc, len(self.columns)
)
)
if loc < 0:
raise ValueError("unbounded slice")
new_query_compiler = self._query_compiler.insert(loc, column, value)
self._update_inplace(new_query_compiler=new_query_compiler)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
downcast=None,
**kwargs
):
return self._default_to_pandas(
pandas.DataFrame.interpolate,
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
downcast=downcast,
**kwargs
)
def iterrows(self):
"""Iterate over DataFrame rows as (index, Series) pairs.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the rows of the frame.
"""
index_iter = iter(self.index)
def iterrow_builder(df):
df.columns = self.columns
df.index = [next(index_iter)]
return df.iterrows()
partition_iterator = PartitionIterator(self._query_compiler, 0, iterrow_builder)
for v in partition_iterator:
yield v
def items(self):
"""Iterator over (column name, Series) pairs.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the columns of the frame.
"""
col_iter = iter(self.columns)
def items_builder(df):
df.columns = [next(col_iter)]
df.index = self.index
return df.items()
partition_iterator = PartitionIterator(self._query_compiler, 1, items_builder)
for v in partition_iterator:
yield v
def iteritems(self):
"""Iterator over (column name, Series) pairs.
Note:
Returns the same thing as .items()
Returns:
A generator that iterates over the columns of the frame.
"""
return self.items()
def itertuples(self, index=True, name="Pandas"):
"""Iterate over DataFrame rows as namedtuples.
Args:
index (boolean, default True): If True, return the index as the
first element of the tuple.
name (string, default "Pandas"): The name of the returned
namedtuples or None to return regular tuples.
Note:
Generators can't be pickled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A tuple representing row data. See args for varying tuples.
"""
index_iter = iter(self.index)
def itertuples_builder(df):
df.columns = self.columns
df.index = [next(index_iter)]
return df.itertuples(index=index, name=name)
partition_iterator = PartitionIterator(
self._query_compiler, 0, itertuples_builder
)
for v in partition_iterator:
yield v
def join(self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False):
"""Join two or more DataFrames, or a DataFrame with a collection.
Args:
other: What to join this DataFrame with.
on: A column name to use from the left for the join.
how: What type of join to conduct.
lsuffix: The suffix to add to column names that match on left.
rsuffix: The suffix to add to column names that match on right.
sort: Whether or not to sort.
Returns:
The joined DataFrame.
"""
if on is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.join,
other,
on=on,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
if isinstance(other, pandas.Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
# Joining the empty DataFrames with either index or columns is
# fast. It gives us proper error checking for the edge cases that
# would otherwise require a lot more logic.
pandas.DataFrame(columns=self.columns).join(
pandas.DataFrame(columns=other.columns),
lsuffix=lsuffix,
rsuffix=rsuffix,
).columns
return DataFrame(
query_compiler=self._query_compiler.join(
other._query_compiler,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
else:
# This constraint carried over from Pandas.
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
# See note above about error checking with an empty join.
pandas.DataFrame(columns=self.columns).join(
[pandas.DataFrame(columns=obj.columns) for obj in other],
lsuffix=lsuffix,
rsuffix=rsuffix,
).columns
return DataFrame(
query_compiler=self._query_compiler.join(
[obj._query_compiler for obj in other],
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.kurt,
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs
)
def kurtosis(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.kurtosis,
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs
)
def last(self, offset):
return self._default_to_pandas(pandas.DataFrame.last, offset)
def last_valid_index(self):
"""Return index for last non-NA/null value.
Returns:
scalar: type of index
"""
return self._query_compiler.last_valid_index()
def le(self, other, axis="columns", level=None):
"""Checks element-wise that this is less than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the le over.
level: The Multilevel index level to apply le over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.le, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.le(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def lookup(self, row_labels, col_labels):
return self._default_to_pandas(pandas.DataFrame.lookup, row_labels, col_labels)
def lt(self, other, axis="columns", level=None):
"""Checks element-wise that this is less than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the lt over.
level: The Multilevel index level to apply lt over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.lt, other, axis=axis, level=level
)
other = self._validate_other(other, axis, comparison_dtypes_only=True)
new_query_compiler = self._query_compiler.lt(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def mad(self, axis=None, skipna=None, level=None):
return self._default_to_pandas(
pandas.DataFrame.mad, axis=axis, skipna=skipna, level=level
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
raise_on_error=None,
):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mask,
cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
errors=errors,
try_cast=try_cast,
raise_on_error=raise_on_error,
)
def max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Perform max across the DataFrame.
Args:
axis (int): The axis to take the max on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The max of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_min_max(axis, numeric_only)
return self._query_compiler.max(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Computes mean across the DataFrame.
Args:
axis (int): The axis to take the mean on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The mean of the DataFrame. (Pandas series)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.mean(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Computes median across the DataFrame.
Args:
axis (int): The axis to take the median on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The median of the DataFrame. (Pandas series)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if numeric_only is not None and not numeric_only:
self._validate_dtypes(numeric_only=True)
return self._query_compiler.median(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
return self._default_to_pandas(
pandas.DataFrame.melt,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
def memory_usage(self, index=True, deep=False):
"""Returns the memory usage of each column in bytes
Args:
index (bool): Whether to include the memory usage of the DataFrame's
index in returned Series. Defaults to True
deep (bool): If True, introspect the data deeply by interrogating
objects dtypes for system-level memory consumption. Defaults to False
Returns:
A Series where the index are the column names and the values are
the memory usage of each of the columns in bytes. If `index=true`,
then the first value of the Series will be 'Index' with its memory usage.
"""
result = self._query_compiler.memory_usage(index=index, deep=deep)
result.index = self.columns
if index:
index_value = self.index.memory_usage(deep=deep)
return pandas.Series(index_value, index=["Index"]).append(result)
return result
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
):
"""Database style join, where common columns in "on" are merged.
Args:
right: The DataFrame to merge against.
how: What type of join to use.
on: The common column name(s) to join on. If None, and left_on and
right_on are also None, will default to all commonly named
columns.
left_on: The column(s) on the left to use for the join.
right_on: The column(s) on the right to use for the join.
left_index: Use the index from the left as the join keys.
right_index: Use the index from the right as the join keys.
sort: Sort the join keys lexicographically in the result.
suffixes: Add this suffix to the common names not in the "on".
copy: Does nothing in our implementation
indicator: Adds a column named _merge to the DataFrame with
metadata from the merge about each row.
validate: Checks if merge is a specific type.
Returns:
A merged Dataframe
"""
if not isinstance(right, DataFrame):
raise ValueError(
"can not merge DataFrame with instance of type "
"{}".format(type(right))
)
if left_index is False or right_index is False:
if isinstance(right, DataFrame):
right = right._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.merge,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
if left_index and right_index:
return self.join(
right, how=how, lsuffix=suffixes[0], rsuffix=suffixes[1], sort=sort
)
def min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs):
"""Perform min across the DataFrame.
Args:
axis (int): The axis to take the min on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The min of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_min_max(axis, numeric_only)
return self._query_compiler.min(
axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs
)
def mod(self, other, axis="columns", level=None, fill_value=None):
"""Mods this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the mod against this.
axis: The axis to mod over.
level: The Multilevel index level to apply mod over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Mod applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mod,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.mod(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def mode(self, axis=0, numeric_only=False):
"""Perform mode across the DataFrame.
Args:
axis (int): The axis to take the mode on.
numeric_only (bool): if True, only apply to numeric columns.
Returns:
DataFrame: The mode of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
return DataFrame(
query_compiler=self._query_compiler.mode(
axis=axis, numeric_only=numeric_only
)
)
def mul(self, other, axis="columns", level=None, fill_value=None):
"""Multiplies this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.mul,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.mul(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def multiply(self, other, axis="columns", level=None, fill_value=None):
"""Synonym for mul.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
return self.mul(other, axis, level, fill_value)
def ne(self, other, axis="columns", level=None):
"""Checks element-wise that this is not equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the ne over.
level: The Multilevel index level to apply ne over.
Returns:
A new DataFrame filled with Booleans.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.ne, other, axis=axis, level=level
)
other = self._validate_other(other, axis)
new_query_compiler = self._query_compiler.ne(
other=other, axis=axis, level=level
)
return self._create_dataframe_from_compiler(new_query_compiler)
def nlargest(self, n, columns, keep="first"):
return self._default_to_pandas(pandas.DataFrame.nlargest, n, columns, keep=keep)
def notna(self):
"""Perform notna across the DataFrame.
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return DataFrame(query_compiler=self._query_compiler.notna())
def notnull(self):
"""Perform notnull across the DataFrame.
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return DataFrame(query_compiler=self._query_compiler.notnull())
def nsmallest(self, n, columns, keep="first"):
return self._default_to_pandas(
pandas.DataFrame.nsmallest, n, columns, keep=keep
)
def nunique(self, axis=0, dropna=True):
"""Return Series with number of distinct
observations over requested axis.
Args:
axis : {0 or 'index', 1 or 'columns'}, default 0
dropna : boolean, default True
Returns:
nunique : Series
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
return self._query_compiler.nunique(axis=axis, dropna=dropna)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwargs):
return self._default_to_pandas(
pandas.DataFrame.pct_change,
periods=periods,
fill_method=fill_method,
limit=limit,
freq=freq,
**kwargs
)
def pipe(self, func, *args, **kwargs):
"""Apply func(self, *args, **kwargs)
Args:
func: function to apply to the df.
args: positional arguments passed into ``func``.
kwargs: a dictionary of keyword arguments passed into ``func``.
Returns:
object: the return type of ``func``.
"""
return com._pipe(self, func, *args, **kwargs)
def pivot(self, index=None, columns=None, values=None):
return self._default_to_pandas(
pandas.DataFrame.pivot, index=index, columns=columns, values=values
)
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
):
return self._default_to_pandas(
pandas.DataFrame.pivot_table,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
)
@property
def plot(
self,
x=None,
y=None,
kind="line",
ax=None,
subplots=False,
sharex=None,
sharey=False,
layout=None,
figsize=None,
use_index=True,
title=None,
grid=None,
legend=True,
style=None,
logx=False,
logy=False,
loglog=False,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
rot=None,
fontsize=None,
colormap=None,
table=False,
yerr=None,
xerr=None,
secondary_y=False,
sort_columns=False,
**kwargs
):
return to_pandas(self).plot
def pop(self, item):
"""Pops an item from this DataFrame and returns it.
Args:
item (str): Column label to be popped
Returns:
A Series containing the popped values. Also modifies this
DataFrame.
"""
result = self[item]
del self[item]
return result
def pow(self, other, axis="columns", level=None, fill_value=None):
"""Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
"""
if level is not None:
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.pow,
other,
axis=axis,
level=level,
fill_value=fill_value,
)
other = self._validate_other(other, axis, numeric_only=True)
new_query_compiler = self._query_compiler.pow(
other=other, axis=axis, level=level, fill_value=fill_value
)
return self._create_dataframe_from_compiler(new_query_compiler)
def prod(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=1,
**kwargs
):
"""Return the product of the values for the requested axis
Args:
axis : {index (0), columns (1)}
skipna : boolean, default True
level : int or level name, default None
numeric_only : boolean, default None
min_count : int, default 1
Returns:
prod : Series or DataFrame (if level specified)
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=True)
return self._query_compiler.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def product(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=1,
**kwargs
):
"""Return the product of the values for the requested axis
Args:
axis : {index (0), columns (1)}
skipna : boolean, default True
level : int or level name, default None
numeric_only : boolean, default None
min_count : int, default 1
Returns:
product : Series or DataFrame (if level specified)
"""
return self.prod(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""Return values at the given quantile over requested axis,
a la numpy.percentile.
Args:
q (float): 0 <= q <= 1, the quantile(s) to compute
axis (int): 0 or 'index' for row-wise,
1 or 'columns' for column-wise
interpolation: {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specifies which interpolation method to use
Returns:
quantiles : Series or DataFrame
If q is an array, a DataFrame will be returned where the
index is q, the columns are the columns of self, and the
values are the quantiles.
If q is a float, a Series will be returned where the
index is the columns of self and the values
are the quantiles.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
def check_dtype(t):
return | is_numeric_dtype(t) | pandas.core.dtypes.common.is_numeric_dtype |
# -*- coding: utf-8 -*-
"""
Code for interfacing with the Exoplanet Archive catalogs.
"""
from __future__ import division, print_function
import os
import logging
from pkg_resources import resource_filename
import pandas as pd
from six.moves import urllib
from .settings import PEERLESS_DATA_DIR
__all__ = [
"KOICatalog", "KICatalog", "EBCatalog", "BlacklistCatalog",
"TargetCatalog", "DatasetsCatalog", "CumulativeCatalog", "UeharaCatalog",
"WangCatalog",
]
def download():
for c in (KOICatalog, KICatalog):
print("Downloading {0}...".format(c.cls.__name__))
c().fetch(clobber=True)
class Catalog(object):
url = None
name = None
ext = ".h5"
def __init__(self, data_root=None):
self.data_root = PEERLESS_DATA_DIR if data_root is None else data_root
self._df = None
self._spatial = None
@property
def filename(self):
if self.name is None:
raise NotImplementedError("subclasses must provide a name")
return os.path.join(self.data_root, "catalogs", self.name + self.ext)
def fetch(self, clobber=False):
# Check for a local file first.
fn = self.filename
if os.path.exists(fn) and not clobber:
logging.info("Found local file: '{0}'".format(fn))
return
# Fetch the remote file.
if self.url is None:
raise NotImplementedError("subclasses must provide a URL")
url = self.url
logging.info("Downloading file from: '{0}'".format(url))
r = urllib.request.Request(url)
handler = urllib.request.urlopen(r)
code = handler.getcode()
if int(code) != 200:
raise CatalogDownloadError(code, url, "")
# Make sure that the root directory exists.
try:
os.makedirs(os.path.split(fn)[0])
except os.error:
pass
self._save_fetched_file(handler)
def _save_fetched_file(self, file_handle):
raise NotImplementedError("subclasses must implement this method")
@property
def df(self):
if self._df is None:
if not os.path.exists(self.filename):
self.fetch()
self._df = pd.read_hdf(self.filename, self.name)
return self._df
class ExoplanetArchiveCatalog(Catalog):
@property
def url(self):
if self.name is None:
raise NotImplementedError("subclasses must provide a name")
return ("http://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/"
"nph-nstedAPI?table={0}&select=*").format(self.name)
def _save_fetched_file(self, file_handle):
df = | pd.read_csv(file_handle) | pandas.read_csv |
#!/usr/bin/env python3
#Author: <NAME>
#Contact: <EMAIL>
from __future__ import print_function
from . import SigProfilerMatrixGenerator as matGen
import os
import SigProfilerMatrixGenerator as sig
import re
import sys
import pandas as pd
import datetime
from SigProfilerMatrixGenerator.scripts import convert_input_to_simple_files as convertIn
import uuid
import shutil
import time
import numpy as np
import platform
import itertools
import statsmodels
import matplotlib as plt
from pathlib import Path
import sigProfilerPlotting as sigPlt
import scipy
def perm(n, seq):
'''
Generates a list of all available permutations of n-mers.
Parameters:
n -> length of the desired permutation string
seq -> list of all possible string values
Returns:
permus -> list of all available permutations
'''
permus = []
for p in itertools.product(seq, repeat=n):
permus.append("".join(p))
return(permus)
def SigProfilerMatrixGeneratorFunc (project, genome, vcfFiles, exome=False, bed_file=None, chrom_based=False, plot=False, tsb_stat=False, seqInfo=False, cushion=100, gs=False):
'''
Allows for the import of the sigProfilerMatrixGenerator.py function. Returns a dictionary
with each context serving as the first level of keys.
Parameters:
project -> unique name given to the current samples
genome -> reference genome
vcfFiles -> path where the input vcf files are located.
exome -> flag to use only the exome or not
bed_file -> BED file that contains a list of ranges to be used in generating the matrices
chrom_based -> flag to create the matrices on a per chromosome basis
plot -> flag to generate the plots for each context
tsb_stat -> performs a transcriptional strand bias test for the 24, 384, and 6144 contexts. The output is
saved into the output/TSB directory
gs -> flag that performs a gene strand bias test
Returns:
matrices -> dictionary (nested) of the matrices for each context
example:
matrices = {'96': {'PD1001a':{'A[A>C]A':23,
'A[A>G]A':10,...},
'PD1202a':{'A[A>C]A':23,
'A[A>G]A':10,...},...},
'192':{'PD1001a':{'T:A[A>C]A':23,
'T:A[A>G]A':10,...},
'PD1202a':{'T:A[A>C]A':23,
'T:A[A>G]A':10,...},...},...}
'''
# Instantiates all of the required variables and references
if gs:
print("The Gene Strand Bias is not yet supported! Continuing with the matrix generation.")
gs = False
functionFlag = True
bed = False
bed_ranges = None
limited_indel = True
exome = exome
plot = plot
# Instantiates the final output matrix
matrices = {'96':None, '1536':None, '384':None, '6144':None, 'DINUC':None, '6':None, '24':None, 'INDEL':None}
# Provides a chromosome conversion from NCBI notation
ncbi_chrom = {'NC_000067.6':'1', 'NC_000068.7':'2', 'NC_000069.6':'3', 'NC_000070.6':'4',
'NC_000071.6':'5', 'NC_000072.6':'6', 'NC_000073.6':'7', 'NC_000074.6':'8',
'NC_000075.6':'9', 'NC_000076.6':'10', 'NC_000077.6':'11', 'NC_000078.6':'12',
'NC_000079.6':'13', 'NC_000080.6':'14', 'NC_000081.6':'15', 'NC_000082.6':'16',
'NC_000083.6':'17', 'NC_000084.6':'18', 'NC_000085.6':'19', 'NC_000086.7':'X',
'NC_000087.7':'Y'}
# Provides the reference file conversion from binary to base information
tsb_ref = {0:['N','A'], 1:['N','C'], 2:['N','G'], 3:['N','T'],
4:['T','A'], 5:['T','C'], 6:['T','G'], 7:['T','T'],
8:['U','A'], 9:['U','C'], 10:['U','G'], 11:['U','T'],
12:['B','A'], 13:['B','C'], 14:['B','G'], 15:['B','T'],
16:['N','N'], 17:['T','N'], 18:['U','N'], 19:['B','N']}
bias_sort = {'T':0,'U':1,'N':3,'B':2, 'Q':4}
tsb = ['T','U','N','B']
tsb_I = ['T','U','N','B','Q']
bases = ['A','C','G','T']
mutation_types = ['CC>AA','CC>AG','CC>AT','CC>GA','CC>GG','CC>GT','CC>TA','CC>TG','CC>TT',
'CT>AA','CT>AC','CT>AG','CT>GA','CT>GC','CT>GG','CT>TA','CT>TC','CT>TG',
'TC>AA','TC>AG','TC>AT','TC>CA','TC>CG','TC>CT','TC>GA','TC>GG','TC>GT',
'TT>AA','TT>AC','TT>AG','TT>CA','TT>CC','TT>CG','TT>GA','TT>GC','TT>GG']
mutation_types_non_tsb = ['AC>CA','AC>CG','AC>CT','AC>GA','AC>GG','AC>GT','AC>TA','AC>TG','AC>TT',
'AT>CA','AT>CC','AT>CG','AT>GA','AT>GC','AT>TA',
'CG>AT','CG>GC','CG>GT','CG>TA','CG>TC','CG>TT',
'GC>AA','GC>AG','GC>AT','GC>CA','GC>CG','GC>TA',
'TA>AT','TA>CG','TA>CT','TA>GC','TA>GG','TA>GT',
'TG>AA','TG>AC','TG>AT','TG>CA','TG>CC','TG>CT','TG>GA','TG>GC','TG>GT']
indels_seq_types = [ # Single-sequences
'C', 'T',
# Di-sequences
'AC','AT','CA','CC','CG','CT','GC','TA','TC','TT',
# Tri-sequences
'ACC', 'ACT', 'ATC', 'ATT', 'CAC', 'CAT', 'CCA', 'CCC', 'CCG', 'CCT', 'CGC', 'CGT', 'CTA', 'CTC', 'CTG', 'CTT',
'GCC', 'GCT', 'GTC', 'GTT', 'TAC', 'TAT', 'TCA', 'TCC', 'TCG', 'TCT', 'TGC', 'TGT', 'TTA', 'TTC', 'TTG', 'TTT',
# Tetra-sequences
'AACC', 'AACT', 'AATC', 'AATT', 'ACAC', 'ACAT', 'ACCA', 'ACCC', 'ACCG', 'ACCT', 'ACGC', 'ACGT', 'ACTA', 'ACTC', 'ACTG', 'ACTT', 'AGCC', 'AGCT', 'AGTC',
'AGTT', 'ATAC', 'ATAT', 'ATCA', 'ATCC', 'ATCG', 'ATCT', 'ATGC', 'ATGT', 'ATTA', 'ATTC', 'ATTG', 'ATTT', 'CAAC', 'CAAT', 'CACA', 'CACC', 'CACG', 'CACT',
'CAGC', 'CAGT', 'CATA', 'CATC', 'CATG', 'CATT', 'CCAA', 'CCAC', 'CCAG', 'CCAT', 'CCCA', 'CCCC', 'CCCG', 'CCCT', 'CCGA', 'CCGC', 'CCGG', 'CCGT', 'CCTA',
'CCTC', 'CCTG', 'CCTT', 'CGAC', 'CGAT', 'CGCA', 'CGCC', 'CGCG', 'CGCT', 'CGGC', 'CGTA', 'CGTC', 'CGTG', 'CGTT', 'CTAA', 'CTAC', 'CTAG', 'CTAT', 'CTCA',
'CTCC', 'CTCG', 'CTCT', 'CTGA', 'CTGC', 'CTGG', 'CTGT', 'CTTA', 'CTTC', 'CTTG', 'CTTT', 'GACC', 'GATC', 'GCAC', 'GCCA', 'GCCC', 'GCCG', 'GCCT', 'GCGC',
'GCTA', 'GCTC', 'GCTG', 'GCTT', 'GGCC', 'GGTC', 'GTAC', 'GTCA', 'GTCC', 'GTCG', 'GTCT', 'GTGC', 'GTTA', 'GTTC', 'GTTG', 'GTTT', 'TAAC', 'TACA', 'TACC',
'TACG', 'TACT', 'TAGC', 'TATA', 'TATC', 'TATG', 'TATT', 'TCAA', 'TCAC', 'TCAG', 'TCAT', 'TCCA', 'TCCC', 'TCCG', 'TCCT', 'TCGA', 'TCGC', 'TCGG', 'TCGT',
'TCTA', 'TCTC', 'TCTG', 'TCTT', 'TGAC', 'TGCA', 'TGCC', 'TGCG', 'TGCT', 'TGTA', 'TGTC', 'TGTG', 'TGTT', 'TTAA', 'TTAC', 'TTAG', 'TTAT', 'TTCA', 'TTCC',
'TTCG', 'TTCT', 'TTGA', 'TTGC', 'TTGG', 'TTGT', 'TTTA', 'TTTC', 'TTTG', 'TTTT',
# Penta-sequences
'AACCC', 'AACCT', 'AACTC', 'AACTT', 'AATCC', 'AATCT', 'AATTC', 'AATTT', 'ACACC', 'ACACT', 'ACATC', 'ACATT', 'ACCAC', 'ACCAT', 'ACCCA', 'ACCCC', 'ACCCG',
'ACCCT', 'ACCGC', 'ACCGT', 'ACCTA', 'ACCTC', 'ACCTG', 'ACCTT', 'ACGCC', 'ACGCT', 'ACGTC', 'ACGTT', 'ACTAC', 'ACTAT', 'ACTCA', 'ACTCC', 'ACTCG', 'ACTCT',
'ACTGC', 'ACTGT', 'ACTTA', 'ACTTC', 'ACTTG', 'ACTTT', 'AGCCC', 'AGCCT', 'AGCTC', 'AGCTT', 'AGTCC', 'AGTCT', 'AGTTC', 'AGTTT', 'ATACC', 'ATACT', 'ATATC',
'ATATT', 'ATCAC', 'ATCAT', 'ATCCA', 'ATCCC', 'ATCCG', 'ATCCT', 'ATCGC', 'ATCGT', 'ATCTA', 'ATCTC', 'ATCTG', 'ATCTT', 'ATGCC', 'ATGCT', 'ATGTC', 'ATGTT',
'ATTAC', 'ATTAT', 'ATTCA', 'ATTCC', 'ATTCG', 'ATTCT', 'ATTGC', 'ATTGT', 'ATTTA', 'ATTTC', 'ATTTG', 'ATTTT', 'CAACC', 'CAACT', 'CAATC', 'CAATT', 'CACAC',
'CACAT', 'CACCA', 'CACCC', 'CACCG', 'CACCT', 'CACGC', 'CACGT', 'CACTA', 'CACTC', 'CACTG', 'CACTT', 'CAGCC', 'CAGCT', 'CAGTC', 'CAGTT', 'CATAC', 'CATAT',
'CATCA', 'CATCC', 'CATCG', 'CATCT', 'CATGC', 'CATGT', 'CATTA', 'CATTC', 'CATTG', 'CATTT', 'CCAAC', 'CCAAT', 'CCACA', 'CCACC', 'CCACG', 'CCACT', 'CCAGC',
'CCAGT', 'CCATA', 'CCATC', 'CCATG', 'CCATT', 'CCCAA', 'CCCAC', 'CCCAG', 'CCCAT', 'CCCCA', 'CCCCC', 'CCCCG', 'CCCCT', 'CCCGA', 'CCCGC', 'CCCGG', 'CCCGT',
'CCCTA', 'CCCTC', 'CCCTG', 'CCCTT', 'CCGAC', 'CCGAT', 'CCGCA', 'CCGCC', 'CCGCG', 'CCGCT', 'CCGGC', 'CCGGT', 'CCGTA', 'CCGTC', 'CCGTG', 'CCGTT', 'CCTAA',
'CCTAC', 'CCTAG', 'CCTAT', 'CCTCA', 'CCTCC', 'CCTCG', 'CCTCT', 'CCTGA', 'CCTGC', 'CCTGG', 'CCTGT', 'CCTTA', 'CCTTC', 'CCTTG', 'CCTTT', 'CGACC', 'CGACT',
'CGATC', 'CGATT', 'CGCAC', 'CGCAT', 'CGCCA', 'CGCCC', 'CGCCG', 'CGCCT', 'CGCGC', 'CGCGT', 'CGCTA', 'CGCTC', 'CGCTG', 'CGCTT', 'CGGCC', 'CGGCT', 'CGGTC',
'CGGTT', 'CGTAC', 'CGTAT', 'CGTCA', 'CGTCC', 'CGTCG', 'CGTCT', 'CGTGC', 'CGTGT', 'CGTTA', 'CGTTC', 'CGTTG', 'CGTTT', 'CTAAC', 'CTAAT', 'CTACA', 'CTACC',
'CTACG', 'CTACT', 'CTAGC', 'CTAGT', 'CTATA', 'CTATC', 'CTATG', 'CTATT', 'CTCAA', 'CTCAC', 'CTCAG', 'CTCAT', 'CTCCA', 'CTCCC', 'CTCCG', 'CTCCT', 'CTCGA',
'CTCGC', 'CTCGG', 'CTCGT', 'CTCTA', 'CTCTC', 'CTCTG', 'CTCTT', 'CTGAC', 'CTGAT', 'CTGCA', 'CTGCC', 'CTGCG', 'CTGCT', 'CTGGC', 'CTGGT', 'CTGTA', 'CTGTC',
'CTGTG', 'CTGTT', 'CTTAA', 'CTTAC', 'CTTAG', 'CTTAT', 'CTTCA', 'CTTCC', 'CTTCG', 'CTTCT', 'CTTGA', 'CTTGC', 'CTTGG', 'CTTGT', 'CTTTA', 'CTTTC', 'CTTTG',
'CTTTT', 'GACCC', 'GACCT', 'GACTC', 'GACTT', 'GATCC', 'GATCT', 'GATTC', 'GATTT', 'GCACC', 'GCACT', 'GCATC', 'GCATT', 'GCCAC', 'GCCAT', 'GCCCA', 'GCCCC',
'GCCCG', 'GCCCT', 'GCCGC', 'GCCGT', 'GCCTA', 'GCCTC', 'GCCTG', 'GCCTT', 'GCGCC', 'GCGCT', 'GCGTC', 'GCGTT', 'GCTAC', 'GCTAT', 'GCTCA', 'GCTCC', 'GCTCG',
'GCTCT', 'GCTGC', 'GCTGT', 'GCTTA', 'GCTTC', 'GCTTG', 'GCTTT', 'GGCCC', 'GGCCT', 'GGCTC', 'GGCTT', 'GGTCC', 'GGTCT', 'GGTTC', 'GGTTT', 'GTACC', 'GTACT',
'GTATC', 'GTATT', 'GTCAC', 'GTCAT', 'GTCCA', 'GTCCC', 'GTCCG', 'GTCCT', 'GTCGC', 'GTCGT', 'GTCTA', 'GTCTC', 'GTCTG', 'GTCTT', 'GTGCC', 'GTGCT', 'GTGTC',
'GTGTT', 'GTTAC', 'GTTAT', 'GTTCA', 'GTTCC', 'GTTCG', 'GTTCT', 'GTTGC', 'GTTGT', 'GTTTA', 'GTTTC', 'GTTTG', 'GTTTT', 'TAACC', 'TAACT', 'TAATC', 'TAATT',
'TACAC', 'TACAT', 'TACCA', 'TACCC', 'TACCG', 'TACCT', 'TACGC', 'TACGT', 'TACTA', 'TACTC', 'TACTG', 'TACTT', 'TAGCC', 'TAGCT', 'TAGTC', 'TAGTT', 'TATAC',
'TATAT', 'TATCA', 'TATCC', 'TATCG', 'TATCT', 'TATGC', 'TATGT', 'TATTA', 'TATTC', 'TATTG', 'TATTT', 'TCAAC', 'TCAAT', 'TCACA', 'TCACC', 'TCACG', 'TCACT',
'TCAGC', 'TCAGT', 'TCATA', 'TCATC', 'TCATG', 'TCATT', 'TCCAA', 'TCCAC', 'TCCAG', 'TCCAT', 'TCCCA', 'TCCCC', 'TCCCG', 'TCCCT', 'TCCGA', 'TCCGC', 'TCCGG',
'TCCGT', 'TCCTA', 'TCCTC', 'TCCTG', 'TCCTT', 'TCGAC', 'TCGAT', 'TCGCA', 'TCGCC', 'TCGCG', 'TCGCT', 'TCGGC', 'TCGGT', 'TCGTA', 'TCGTC', 'TCGTG', 'TCGTT',
'TCTAA', 'TCTAC', 'TCTAG', 'TCTAT', 'TCTCA', 'TCTCC', 'TCTCG', 'TCTCT', 'TCTGA', 'TCTGC', 'TCTGG', 'TCTGT', 'TCTTA', 'TCTTC', 'TCTTG', 'TCTTT', 'TGACC',
'TGACT', 'TGATC', 'TGATT', 'TGCAC', 'TGCAT', 'TGCCA', 'TGCCC', 'TGCCG', 'TGCCT', 'TGCGC', 'TGCGT', 'TGCTA', 'TGCTC', 'TGCTG', 'TGCTT', 'TGGCC', 'TGGCT',
'TGGTC', 'TGGTT', 'TGTAC', 'TGTAT', 'TGTCA', 'TGTCC', 'TGTCG', 'TGTCT', 'TGTGC', 'TGTGT', 'TGTTA', 'TGTTC', 'TGTTG', 'TGTTT', 'TTAAC', 'TTAAT', 'TTACA',
'TTACC', 'TTACG', 'TTACT', 'TTAGC', 'TTAGT', 'TTATA', 'TTATC', 'TTATG', 'TTATT', 'TTCAA', 'TTCAC', 'TTCAG', 'TTCAT', 'TTCCA', 'TTCCC', 'TTCCG', 'TTCCT',
'TTCGA', 'TTCGC', 'TTCGG', 'TTCGT', 'TTCTA', 'TTCTC', 'TTCTG', 'TTCTT', 'TTGAC', 'TTGAT', 'TTGCA', 'TTGCC', 'TTGCG', 'TTGCT', 'TTGGC', 'TTGGT', 'TTGTA',
'TTGTC', 'TTGTG', 'TTGTT', 'TTTAA', 'TTTAC', 'TTTAG', 'TTTAT', 'TTTCA', 'TTTCC', 'TTTCG', 'TTTCT', 'TTTGA', 'TTTGC', 'TTTGG', 'TTTGT', 'TTTTA', 'TTTTC',
'TTTTG', 'TTTTT']
# Pre-fills the mutation types variable
size = 5
mut_types_initial = perm(size, "ACGT")
mut_types = []
for tsbs in tsb:
for mut in mut_types_initial:
current_base = mut[int(size/2)]
if current_base == 'C' or current_base == 'T':
for base in bases:
if base != current_base:
mut_types.append(tsbs+":"+mut[0:int(size/2)] + "[" + current_base+">"+ base+"]"+mut[int(size/2)+1:])
# Organizes all of the mutation types for DINUCs
mutation_types_tsb_context = []
for base in bases:
for mut in mutation_types:
for base2 in bases:
for base3 in tsb:
mutation_types_tsb_context.append(''.join([base3,":",base,"[",mut,"]",base2]))
for base in bases:
for mut in mutation_types_non_tsb:
for base2 in bases:
mutation_types_tsb_context.append(''.join(['Q:', base, "[", mut, "]", base2]))
indel_types_tsb = []
indel_types_simple = []
indel_complete = []
indel_cat = ['Del', 'Ins']
indel_types = ['1:Del:C:0', '1:Del:C:1', '1:Del:C:2', '1:Del:C:3', '1:Del:C:4', '1:Del:C:5',
'1:Del:T:0', '1:Del:T:1', '1:Del:T:2', '1:Del:T:3', '1:Del:T:4', '1:Del:T:5',
'1:Ins:C:0', '1:Ins:C:1', '1:Ins:C:2', '1:Ins:C:3', '1:Ins:C:4', '1:Ins:C:5',
'1:Ins:T:0', '1:Ins:T:1', '1:Ins:T:2', '1:Ins:T:3', '1:Ins:T:4', '1:Ins:T:5',
# >1bp INDELS
'2:Del:R:0', '2:Del:R:1', '2:Del:R:2', '2:Del:R:3', '2:Del:R:4', '2:Del:R:5',
'3:Del:R:0', '3:Del:R:1', '3:Del:R:2', '3:Del:R:3', '3:Del:R:4', '3:Del:R:5',
'4:Del:R:0', '4:Del:R:1', '4:Del:R:2', '4:Del:R:3', '4:Del:R:4', '4:Del:R:5',
'5:Del:R:0', '5:Del:R:1', '5:Del:R:2', '5:Del:R:3', '5:Del:R:4', '5:Del:R:5',
'2:Ins:R:0', '2:Ins:R:1', '2:Ins:R:2', '2:Ins:R:3', '2:Ins:R:4', '2:Ins:R:5',
'3:Ins:R:0', '3:Ins:R:1', '3:Ins:R:2', '3:Ins:R:3', '3:Ins:R:4', '3:Ins:R:5',
'4:Ins:R:0', '4:Ins:R:1', '4:Ins:R:2', '4:Ins:R:3', '4:Ins:R:4', '4:Ins:R:5',
'5:Ins:R:0', '5:Ins:R:1', '5:Ins:R:2', '5:Ins:R:3', '5:Ins:R:4', '5:Ins:R:5',
#MicroHomology INDELS
'2:Del:M:1', '3:Del:M:1', '3:Del:M:2', '4:Del:M:1', '4:Del:M:2', '4:Del:M:3',
'5:Del:M:1', '5:Del:M:2', '5:Del:M:3', '5:Del:M:4', '5:Del:M:5', '2:Ins:M:1',
'3:Ins:M:1', '3:Ins:M:2', '4:Ins:M:1', '4:Ins:M:2', '4:Ins:M:3', '5:Ins:M:1',
'5:Ins:M:2', '5:Ins:M:3', '5:Ins:M:4', '5:Ins:M:5', 'complex', 'non_matching']
for indels in indel_types[:-13]:
for tsbs in tsb_I:
indel_types_tsb.append(tsbs + ":" + indels)
for indels in indels_seq_types:
repeat = str(len(indels))
for id_cat in indel_cat:
for l in range(0, 6, 1):
indel_complete.append(":".join([repeat, id_cat, indels, str(l)]))
for id_cat in indel_cat:
for i in range(0, 6, 1):
indel_complete.append(":".join(['5',id_cat, '5',str(i)]))
indel_types_simple = indel_types[:24]
indel_types_simple.append('long_Del')
indel_types_simple.append('long_Ins')
indel_types_simple.append('MH')
indel_types_simple.append('complex')
# Instantiates the initial contexts to generate matrices for
contexts = ['6144']
# Organizes all of the reference directories for later reference:
ref_dir, tail = os.path.split(os.path.dirname(os.path.abspath(__file__)))
chrom_path =ref_dir + '/references/chromosomes/tsb/' + genome + "/"
transcript_path = ref_dir + '/references/chromosomes/transcripts/' + genome + "/"
# Terminates the code if the genome reference files have not been created/installed
if not os.path.exists(chrom_path):
print("The specified genome: " + genome + " has not been installed\nRun the following command to install the genome:\n\tpython sigProfilerMatrixGenerator/install.py -g " + genome)
sys.exit()
# Organizes all of the input and output directories:
if vcfFiles[-1] != "/":
vcfFiles += "/"
vcf_path = vcfFiles + "input/"
vcf_path_original = vcf_path
if not os.path.exists(vcf_path) or len(os.listdir(vcf_path)) < 1:
os.makedirs(vcf_path, exist_ok=True)
input_files = os.listdir(vcfFiles)
if os.path.exists(vcfFiles + "input/"):
input_files.remove("input")
if os.path.exists(vcfFiles + "logs/"):
input_files.remove("logs")
if ".DS_Store" in input_files:
input_files.remove(".DS_Store")
if "__init__.py" in input_files:
input_files.remove("__init__.py")
if "__pycache__" in input_files:
input_files.remove("__pycache__")
if os.path.exists(vcfFiles + "output/"):
input_files.remove("output")
for files in input_files:
shutil.copy(vcfFiles + files, vcf_path + files)
output_matrix = vcfFiles + "output/"
if not os.path.exists(output_matrix):
os.makedirs(output_matrix)
# Organizes the error and log files
time_stamp = datetime.date.today()
output_log_path = vcfFiles + "logs/"
if not os.path.exists(output_log_path):
os.makedirs(output_log_path)
error_file = output_log_path + 'SigProfilerMatrixGenerator_' + project + "_" + genome + str(time_stamp) + ".err"
log_file = output_log_path + 'SigProfilerMatrixGenerator_' + project + "_" + genome + str(time_stamp) + ".out"
if os.path.exists(error_file):
os.remove(error_file)
if os.path.exists(log_file):
os.remove(log_file)
sys.stderr = open(error_file, 'w')
log_out = open(log_file, 'w')
log_out.write("THIS FILE CONTAINS THE METADATA ABOUT SYSTEM AND RUNTIME\n\n\n")
log_out.write("-------System Info-------\n")
log_out.write("Operating System Name: "+ platform.uname()[0]+"\n"+"Nodename: "+ platform.uname()[1]+"\n"+"Release: "+ platform.uname()[2]+"\n"+"Version: "+ platform.uname()[3]+"\n")
log_out.write("\n-------Python and Package Versions------- \n")
log_out.write("Python Version: "+str(platform.sys.version_info.major)+"."+str(platform.sys.version_info.minor)+"."+str(platform.sys.version_info.micro)+"\n")
log_out.write("SigProfilerMatrixGenerator Version: "+sig.__version__+"\n")
log_out.write("SigProfilerPlotting version: "+sigPlt.__version__+"\n")
log_out.write("matplotlib version: "+plt.__version__+"\n")
log_out.write("statsmodels version: "+statsmodels.__version__+"\n")
log_out.write("scipy version: "+scipy.__version__+"\n")
log_out.write("pandas version: "+pd.__version__+"\n")
log_out.write("numpy version: "+np.__version__+"\n")
log_out.write("\n-------Vital Parameters Used for the execution -------\n")
log_out.write("Project: {}\nGenome: {}\nInput File Path: {}\nexome: {}\nbed_file: {}\nchrom_based: {}\nplot: {}\ntsb_stat: {}\nseqInfo: {}\n".format(project, genome, vcfFiles, str(exome), str(bed_file), str(chrom_based), str(plot), str(tsb_stat), str(seqInfo)))
log_out.write("\n-------Date and Time Data------- \n")
tic = datetime.datetime.now()
log_out.write("Date and Clock time when the execution started: "+str(tic)+"\n\n\n")
log_out.write("-------Runtime Checkpoints------- \n")
log_out.close()
# Gathers all of the vcf files:
vcf_files_temp = os.listdir(vcf_path)
vcf_files = []
first_extenstion = True
for file in vcf_files_temp:
# Skips hidden files
if file[0:3] == '.DS' or file[0:2] == '__':
pass
else:
vcf_files.append(file)
# Creates a temporary folder for sorting and generating the matrices
file_name = vcf_files[0].split(".")
file_extension = file_name[-1]
unique_folder = project + "_"+ str(uuid.uuid4())
output_path = output_matrix + "temp/" + unique_folder + "/"
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.makedirs(output_path)
skipped_muts = 0
# Converts the input files to standard text in the temporary folder
if file_extension == 'genome':
snv, indel, skipped, samples = convertIn.convertTxt(project, vcf_path, genome, output_path)
else:
if file_extension == 'txt':
snv, indel, skipped, samples = convertIn.convertTxt(project, vcf_path, genome, output_path, ncbi_chrom, log_file)
elif file_extension == 'vcf':
snv, indel, skipped, samples = convertIn.convertVCF(project, vcf_path, genome, output_path, ncbi_chrom, log_file)
elif file_extension == 'maf':
snv, indel, skipped, samples = convertIn.convertMAF(project, vcf_path, genome, output_path, ncbi_chrom, log_file)
elif file_extension == 'tsv':
snv, indel, skipped, samples = convertIn.convertICGC(project, vcf_path, genome, output_path, ncbi_chrom, log_file)
else:
print("File format not supported")
skipped_muts += skipped
# Instantiates variables for final output statistics
analyzed_muts = [0, 0, 0]
sample_count_high = 0
# Begins matrix generation for all possible contexts
for i in range(0, 2, 1):
if i == 0 and snv:
mutation_pd = {}
mutation_pd['6144'] = pd.DataFrame(0, index=mut_types, columns=samples)
mutation_dinuc_pd_all = pd.DataFrame(0, index=mutation_types_tsb_context, columns=samples)
output_path_snv = output_path + "SNV/"
vcf_files = os.listdir(output_path_snv)
vcf_path = output_path_snv
print("Starting matrix generation for SNVs and DINUCs...", end='', flush=True)
start = time.time()
# Skips SNVs if none are present
elif i == 0 and not snv:
continue
elif i == 1 and indel:
mutation_ID = {}
mutation_ID['ID'] = pd.DataFrame(0, index=indel_types, columns=samples)
mutation_ID['simple'] = pd.DataFrame(0, index=indel_types_simple, columns=samples)
mutation_ID['tsb'] = pd.DataFrame(0, index=indel_types_tsb, columns=samples)
mutation_ID['complete'] = pd.DataFrame(0, index=indel_complete, columns=samples)
contexts = ['INDEL']
output_path_indel = output_path + "INDEL/"
vcf_files = os.listdir(output_path_indel)
vcf_path = output_path_indel
print("Starting matrix generation for INDELs...", end='', flush=True)
start = time.time()
# Skips INDELs if none are present and deletes the temp folder
elif i ==1 and not indel:
shutil.rmtree(output_matrix + "temp/")
continue
# Removes hidden files generated in macos
if ".DS_Store" in vcf_files:
vcf_files.remove(".DS_Store")
# Generates the bed regions if a bed file was provided
if bed_file != None:
bed = True
bed_file_path = bed_file
bed_ranges = matGen.BED_filtering(bed_file_path)
else:
bed_file_path = None
# Sorts files based on chromosome, sample, and start position
if not chrom_based:
chrom_start = None
if i != 1:
for file in vcf_files:
chrom = file.split("_")[0]
with open(vcf_path + file) as f:
lines = [line.strip().split() for line in f]
lines = sorted(lines, key = lambda x: (x[0], int(x[2])))
context = '6144'
mutation_pd, skipped_mut, total, total_DINUC, mutation_dinuc_pd_all = matGen.catalogue_generator_single (lines, chrom, mutation_pd, mutation_dinuc_pd_all, mutation_types_tsb_context, vcf_path, vcf_path_original, vcf_files, bed_file_path, chrom_path, project, output_matrix, context, exome, genome, ncbi_chrom, functionFlag, bed, bed_ranges, chrom_based, plot, tsb_ref, transcript_path, tsb_stat, seqInfo, gs, log_file)
if chrom_based and not exome and not bed:
matrices = matGen.matrix_generator (context, output_matrix, project, samples, bias_sort, mutation_pd, exome, mut_types, bed, chrom, functionFlag, plot, tsb_stat)
mutation_pd = {}
mutation_pd['6144'] = pd.DataFrame(0, index=mut_types, columns=samples)
dinuc_mat = matGen.matrix_generator_DINUC (output_matrix, samples, bias_sort, mutation_dinuc_pd_all, mutation_types_tsb_context, project, exome, bed, chrom, plot)
mutation_dinuc_pd_all = pd.DataFrame(0, index=mutation_types_tsb_context, columns=samples)
skipped_muts += skipped_mut
analyzed_muts[0] += total
analyzed_muts[1] += total_DINUC
sample_count_high = len(samples)
if exome:
with open(vcf_path + "exome_temp.txt") as f:
lines = [line.strip().split() for line in f]
output = open(vcf_path + "exome_temp.txt", 'w')
for line in sorted(lines, key = lambda x: (['I','II','III','IV','V','chrI','chrII','chrIII','chrIV','chrV','X','Y','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23','24',
'25','26','27','28','29','30','31','32','33','34','35','36','37','38','39', 'MT', 'M', 'MtDNA'].index(x[1]), int(x[2]))):
print('\t'.join(line), file=output)
output.close()
mutation_pd = {}
mutation_pd['6144'] = pd.DataFrame(0, index=mut_types, columns=samples)
# mutation_pd['6144'], samples2 = matGen.exome_check(mutation_pd['6144'], genome, vcf_path + "exome_temp.txt", output_matrix, project, "SNV", cushion)
mutation_pd['6144'], samples2 = matGen.exome_check(chrom_based, samples, bias_sort, exome, mut_types, bed, chrom, functionFlag, plot, tsb_stat, mutation_pd['6144'], genome, vcf_path + "exome_temp.txt", output_matrix, project, "SNV", cushion)
if bed:
with open(vcf_path + "bed_temp.txt") as f:
lines = [line.strip().split() for line in f]
output = open(vcf_path + "bed_temp.txt", 'w')
for line in sorted(lines, key = lambda x: (['I','II','III','IV','V','chrI','chrII','chrIII','chrIV','chrV','X','Y','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23','24',
'25','26','27','28','29','30','31','32','33','34','35','36','37','38','39', 'MT', 'M', 'MtDNA'].index(x[1]), int(x[2]))):
print('\t'.join(line), file=output)
output.close()
mutation_pd = {}
mutation_pd['6144'] = pd.DataFrame(0, index=mut_types, columns=samples)
mutation_pd['6144'], samples2 = matGen.panel_check(chrom_based, samples, bias_sort, exome, mut_types, bed, chrom, functionFlag, plot, tsb_stat, mutation_pd['6144'], genome, vcf_path + "bed_temp.txt", output_matrix, bed_file_path, project, "SNV", cushion)
if not chrom_based:
if not mutation_pd['6144'].empty:
matrices = matGen.matrix_generator (context, output_matrix, project, samples, bias_sort, mutation_pd, exome, mut_types, bed, chrom_start, functionFlag, plot, tsb_stat)
if analyzed_muts[1] > 0:
if exome:
with open(vcf_path + "exome_temp_context_tsb_DINUC.txt") as f:
lines = [line.strip().split() for line in f]
output = open(vcf_path + "exome_temp_context_tsb_DINUC.txt", 'w')
for line in sorted(lines, key = lambda x: (['I','II','III','IV','V','chrI','chrII','chrIII','chrIV','chrV','X','Y','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23','24',
'25','26','27','28','29','30','31','32','33','34','35','36','37','38','39', 'MT', 'M', 'MtDNA'].index(x[1]), int(x[2]))):
print('\t'.join(line), file=output)
output.close()
mutation_dinuc_pd_all = pd.DataFrame(0, index=mutation_types_tsb_context, columns=samples)
mutation_dinuc_pd_all, samples2 = matGen.exome_check(chrom_based, samples, bias_sort, exome, mutation_types_tsb_context, bed, chrom, functionFlag, plot, tsb_stat, mutation_dinuc_pd_all, genome, vcf_path + "exome_temp_context_tsb_DINUC.txt", output_matrix, project, "DBS", cushion)
if bed:
with open(vcf_path + "bed_temp_context_tsb_DINUC.txt") as f:
lines = [line.strip().split() for line in f]
output = open(vcf_path + "bed_temp_context_tsb_DINUC.txt", 'w')
for line in sorted(lines, key = lambda x: (['I','II','III','IV','V','chrI','chrII','chrIII','chrIV','chrV','X','Y','1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23','24',
'25','26','27','28','29','30','31','32','33','34','35','36','37','38','39', 'MT', 'M', 'MtDNA'].index(x[1]), int(x[2]))):
print('\t'.join(line), file=output)
output.close()
mutation_dinuc_pd_all = pd.DataFrame(0, index=mutation_types_tsb_context, columns=samples)
mutation_dinuc_pd_all, samples2 = matGen.panel_check(chrom_based, samples, bias_sort, exome, mutation_types_tsb_context, bed, chrom, functionFlag, plot, tsb_stat, mutation_dinuc_pd_all, genome, vcf_path + "bed_temp_context_tsb_DINUC.txt", output_matrix, bed_file_path, project, "DBS", cushion)
if not chrom_based:
if not mutation_dinuc_pd_all.empty:
dinuc_mat = matGen.matrix_generator_DINUC (output_matrix, samples, bias_sort, mutation_dinuc_pd_all, mutation_types_tsb_context, project, exome, bed, chrom_start, plot)
matrices['DINUC'] = dinuc_mat
else:
for file in vcf_files:
chrom = file.split("_")[0]
with open(vcf_path + file) as f:
lines = [line.strip().split() for line in f]
lines = sorted(lines, key = lambda x: (x[0], int(x[2])))
mutation_ID, skipped_mut, total = matGen.catalogue_generator_INDEL_single (mutation_ID, lines, chrom, vcf_path, vcf_path_original, vcf_files, bed_file_path, chrom_path, project, output_matrix, exome, genome, ncbi_chrom, limited_indel, functionFlag, bed, bed_ranges, chrom_based, plot, tsb_ref, transcript_path, seqInfo, gs, log_file)
if chrom_based and not exome and not bed:
matGen.matrix_generator_INDEL(output_matrix, samples, indel_types, indel_types_tsb, indel_types_simple, mutation_ID['ID'], mutation_ID['tsb'], mutation_ID['simple'], mutation_ID['complete'], project, exome, limited_indel, bed, chrom, plot)
mutation_ID['ID'] = pd.DataFrame(0, index=indel_types, columns=samples)
mutation_ID['simple'] = | pd.DataFrame(0, index=indel_types_simple, columns=samples) | pandas.DataFrame |
#!/usr/bin/python3
import json
import requests
from requests import urllib3
import time
import pprint as pp
import csv
import pandas as pd
from docx import Document
from docx.shared import Inches, Pt
from docx.enum.section import WD_ORIENT, WD_SECTION
from docx.enum.text import WD_ALIGN_PARAGRAPH
from ..models import MerakiInfo
import os
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def http_get(meraki_url):
base_url = ('https://api.meraki.com/api/v0/{}'.format(meraki_url))
for api in MerakiInfo.objects.all():
api_key = api.api_key
headers = {'X-Cisco-Meraki-API-Key': api_key}
try:
get_response = requests.get(base_url, headers=headers, verify=False)
status = get_response.status_code
get_response = get_response.json()
time.sleep(0.5)
except:
print('Meraki Cloud not reachable - check connection')
get_response = 'unreachable'
status = 'unreachable'
return get_response, status
def create_word_doc_title():
doc = Document('media/Meraki As Built.docx')
doc.add_page_break()
return doc
def ins_word_doc_image(doc, pic_dir, pic_width=5.25):
doc.add_picture(pic_dir, width=Inches(pic_width))
last_paragraph = doc.paragraphs[-1]
last_paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
doc.add_paragraph()
return doc
def create_word_doc_paragraph(doc, heading_text = '', heading_level = 1,
paragraph_text = ''):
doc.add_heading(heading_text, level=heading_level)
p = doc.add_paragraph(paragraph_text)
return doc
def create_word_doc_text(paragraph_text, doc):
p = doc.add_paragraph(paragraph_text)
return doc
def create_word_doc_bullet(
doc,
bp1 = '',
bp2 = '',
bp3 = '',
bp4 = '',
bp5 = ''
):
if bp1 != '':
doc.add_paragraph(
bp1, style='List Paragraph'
)
if bp2 != '':
doc.add_paragraph(
bp2, style='List Paragraph'
)
if bp3 != '':
doc.add_paragraph(
bp3, style='List Paragraph'
)
if bp4 != '':
doc.add_paragraph(
bp4, style='List Paragraph'
)
if bp5 != '':
doc.add_paragraph(
bp5, style='List Paragraph'
)
return doc
def create_word_doc_table(doc, df):
# add a table to the end and create a reference variable
# extra row is so we can add the header row
#if isinstance(df, pd.DataFrame) and df.empty == False:
try:
t = doc.add_table(df.shape[0]+1, df.shape[1], style = 'Grid Table 4 Accent 6')
# add the header rows.
for j in range(df.shape[-1]):
t.cell(0,j).text = df.columns[j]
# add the rest of the data frame
for i in range(df.shape[0]):
for j in range(df.shape[-1]):
t.cell(i+1,j).text = str(df.values[i,j])
except:
doc = doc
print('Unable to add table')
doc.add_page_break()
return doc
def save_word_document(doc, customer):
doc.save('media/tmp/{}-AS_Built.docx'.format(customer))
def get_network_info(input_dict, append_url = '', dict_key ='', list=True):
for network in input_dict:
try:
data, status_code = http_get(meraki_url='networks/{}/{}'.format(network["id"],
append_url))
if list == True:
for i in data:
if 'errors' not in i:
print(input_dict[dict_key])
input_dict[dict_key].append(i)
else:
print("error found")
elif list == False:
input_dict[dict_key].append(data)
except:
print('{} not reachable'.format(append_url))
def pull_data(meraki_url='organizations'):
# Intitialise dictionary to store returned data
data, status_code = http_get(meraki_url)
return data, status_code
def get_org_info(dn='networks'):
data = []
orgs, status_code = pull_data(meraki_url='organizations')
try:
data_df = pd.DataFrame.from_dict(orgs)
except:
data_df = pd.DataFrame.from_dict(orgs, orient='index')
for org in orgs:
# Get networks from all orgs and add to dictionary
org_data, status_code = pull_data(
meraki_url='organizations/{}/{}'.format(org["id"], dn))
log = ('organizations/{}/{} - Returned status code: {} '.format(
org["id"], dn, status_code))
print(log)
for i in org_data:
data.append(i)
if len(org_data) > 0:
try:
out_df = pd.DataFrame.from_dict(org_data)
except:
out_df = | pd.DataFrame.from_dict(org_data, orient='index') | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 3 17:28:04 2020
@author: shlomi
"""
from PW_paths import work_yuval
from matplotlib import rcParams
import seaborn as sns
from pathlib import Path
import matplotlib.pyplot as plt
from PW_paths import savefig_path
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from PW_stations import produce_geo_gnss_solved_stations
tela_results_path = work_yuval / 'GNSS_stations/tela/rinex/30hr/results'
tela_solutions = work_yuval / 'GNSS_stations/tela/gipsyx_solutions'
sound_path = work_yuval / 'sounding'
phys_soundings = sound_path / 'bet_dagan_phys_sounding_2007-2019.nc'
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
dem_path = work_yuval / 'AW3D30'
era5_path = work_yuval / 'ERA5'
hydro_path = work_yuval / 'hydro'
ceil_path = work_yuval / 'ceilometers'
aero_path = work_yuval / 'AERONET'
climate_path = work_yuval / 'climate'
df_gnss = produce_geo_gnss_solved_stations(
plot=False, add_distance_to_coast=True)
st_order_climate = [x for x in df_gnss.dropna().sort_values(
['groups_climate', 'lat', 'lon'], ascending=[1, 0, 0]).index]
rc = {
'font.family': 'serif',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large'}
for key, val in rc.items():
rcParams[key] = val
# sns.set(rc=rc, style='white')
seasonal_colors = {'DJF': 'tab:blue',
'SON': 'tab:red',
'JJA': 'tab:green',
'MAM': 'tab:orange',
'Annual': 'tab:purple'}
def get_twin(ax, axis):
assert axis in ("x", "y")
siblings = getattr(ax, f"get_shared_{axis}_axes")().get_siblings(ax)
for sibling in siblings:
if sibling.bbox.bounds == ax.bbox.bounds and sibling is not ax:
return sibling
return None
def sci_notation(num, decimal_digits=1, precision=None, exponent=None):
"""
Returns a string representation of the scientific
notation of the given number formatted for use with
LaTeX or Mathtext, with specified number of significant
decimal digits and precision (number of decimal digits
to show). The exponent to be used can also be specified
explicitly.
"""
from math import floor, log10
if exponent is None:
exponent = int(floor(log10(abs(num))))
coeff = round(num / float(10**exponent), decimal_digits)
if precision is None:
precision = decimal_digits
return r"${0:.{2}f}\cdot10^{{{1:d}}}$".format(coeff, exponent, precision)
def utm_from_lon(lon):
"""
utm_from_lon - UTM zone for a longitude
Not right for some polar regions (Norway, Svalbard, Antartica)
:param float lon: longitude
:return: UTM zone number
:rtype: int
"""
from math import floor
return floor((lon + 180) / 6) + 1
def scale_bar(ax, proj, length, location=(0.5, 0.05), linewidth=3,
units='km', m_per_unit=1000, bounds=None):
"""
http://stackoverflow.com/a/35705477/1072212
ax is the axes to draw the scalebar on.
proj is the projection the axes are in
location is center of the scalebar in axis coordinates ie. 0.5 is the middle of the plot
length is the length of the scalebar in km.
linewidth is the thickness of the scalebar.
units is the name of the unit
m_per_unit is the number of meters in a unit
"""
import cartopy.crs as ccrs
from matplotlib import patheffects
# find lat/lon center to find best UTM zone
try:
x0, x1, y0, y1 = ax.get_extent(proj.as_geodetic())
except AttributeError:
if bounds is not None:
x0, x1, y0, y1 = bounds
# Projection in metres
utm = ccrs.UTM(utm_from_lon((x0+x1)/2))
# Get the extent of the plotted area in coordinates in metres
x0, x1, y0, y1 = ax.get_extent(utm)
# Turn the specified scalebar location into coordinates in metres
sbcx, sbcy = x0 + (x1 - x0) * location[0], y0 + (y1 - y0) * location[1]
# Generate the x coordinate for the ends of the scalebar
bar_xs = [sbcx - length * m_per_unit/2, sbcx + length * m_per_unit/2]
# buffer for scalebar
buffer = [patheffects.withStroke(linewidth=5, foreground="w")]
# Plot the scalebar with buffer
ax.plot(bar_xs, [sbcy, sbcy], transform=utm, color='k',
linewidth=linewidth, path_effects=buffer)
# buffer for text
buffer = [patheffects.withStroke(linewidth=3, foreground="w")]
# Plot the scalebar label
t0 = ax.text(sbcx, sbcy, str(length) + ' ' + units, transform=utm,
horizontalalignment='center', verticalalignment='bottom',
path_effects=buffer, zorder=2)
left = x0+(x1-x0)*0.05
# Plot the N arrow
t1 = ax.text(left, sbcy, u'\u25B2\nN', transform=utm,
horizontalalignment='center', verticalalignment='bottom',
path_effects=buffer, zorder=2)
# Plot the scalebar without buffer, in case covered by text buffer
ax.plot(bar_xs, [sbcy, sbcy], transform=utm, color='k',
linewidth=linewidth, zorder=3)
return
@ticker.FuncFormatter
def lon_formatter(x, pos):
if x < 0:
return r'{0:.1f}$\degree$W'.format(abs(x))
elif x > 0:
return r'{0:.1f}$\degree$E'.format(abs(x))
elif x == 0:
return r'0$\degree$'
@ticker.FuncFormatter
def lat_formatter(x, pos):
if x < 0:
return r'{0:.1f}$\degree$S'.format(abs(x))
elif x > 0:
return r'{0:.1f}$\degree$N'.format(abs(x))
elif x == 0:
return r'0$\degree$'
def align_yaxis_np(ax1, ax2):
"""Align zeros of the two axes, zooming them out by same ratio"""
import numpy as np
axes = np.array([ax1, ax2])
extrema = np.array([ax.get_ylim() for ax in axes])
tops = extrema[:,1] / (extrema[:,1] - extrema[:,0])
# Ensure that plots (intervals) are ordered bottom to top:
if tops[0] > tops[1]:
axes, extrema, tops = [a[::-1] for a in (axes, extrema, tops)]
# How much would the plot overflow if we kept current zoom levels?
tot_span = tops[1] + 1 - tops[0]
extrema[0,1] = extrema[0,0] + tot_span * (extrema[0,1] - extrema[0,0])
extrema[1,0] = extrema[1,1] + tot_span * (extrema[1,0] - extrema[1,1])
[axes[i].set_ylim(*extrema[i]) for i in range(2)]
# def align_yaxis(ax1, v1, ax2, v2):
# """adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
# _, y1 = ax1.transData.transform((0, v1))
# _, y2 = ax2.transData.transform((0, v2))
# inv = ax2.transData.inverted()
# _, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
# miny, maxy = ax2.get_ylim()
# ax2.set_ylim(miny+dy, maxy+dy)
def get_legend_labels_handles_title_seaborn_histplot(ax):
old_legend = ax.legend_
handles = old_legend.legendHandles
labels = [t.get_text() for t in old_legend.get_texts()]
title = old_legend.get_title().get_text()
return handles, labels, title
def alignYaxes(axes, align_values=None):
'''Align the ticks of multiple y axes
Args:
axes (list): list of axes objects whose yaxis ticks are to be aligned.
Keyword Args:
align_values (None or list/tuple): if not None, should be a list/tuple
of floats with same length as <axes>. Values in <align_values>
define where the corresponding axes should be aligned up. E.g.
[0, 100, -22.5] means the 0 in axes[0], 100 in axes[1] and -22.5
in axes[2] would be aligned up. If None, align (approximately)
the lowest ticks in all axes.
Returns:
new_ticks (list): a list of new ticks for each axis in <axes>.
A new sets of ticks are computed for each axis in <axes> but with equal
length.
'''
from matplotlib.pyplot import MaxNLocator
import numpy as np
nax = len(axes)
ticks = [aii.get_yticks() for aii in axes]
if align_values is None:
aligns = [ticks[ii][0] for ii in range(nax)]
else:
if len(align_values) != nax:
raise Exception(
"Length of <axes> doesn't equal that of <align_values>.")
aligns = align_values
bounds = [aii.get_ylim() for aii in axes]
# align at some points
ticks_align = [ticks[ii]-aligns[ii] for ii in range(nax)]
# scale the range to 1-100
ranges = [tii[-1]-tii[0] for tii in ticks]
lgs = [-np.log10(rii)+2. for rii in ranges]
igs = [np.floor(ii) for ii in lgs]
log_ticks = [ticks_align[ii]*(10.**igs[ii]) for ii in range(nax)]
# put all axes ticks into a single array, then compute new ticks for all
comb_ticks = np.concatenate(log_ticks)
comb_ticks.sort()
locator = MaxNLocator(nbins='auto', steps=[1, 2, 2.5, 3, 4, 5, 8, 10])
new_ticks = locator.tick_values(comb_ticks[0], comb_ticks[-1])
new_ticks = [new_ticks/10.**igs[ii] for ii in range(nax)]
new_ticks = [new_ticks[ii]+aligns[ii] for ii in range(nax)]
# find the lower bound
idx_l = 0
for i in range(len(new_ticks[0])):
if any([new_ticks[jj][i] > bounds[jj][0] for jj in range(nax)]):
idx_l = i-1
break
# find the upper bound
idx_r = 0
for i in range(len(new_ticks[0])):
if all([new_ticks[jj][i] > bounds[jj][1] for jj in range(nax)]):
idx_r = i
break
# trim tick lists by bounds
new_ticks = [tii[idx_l:idx_r+1] for tii in new_ticks]
# set ticks for each axis
for axii, tii in zip(axes, new_ticks):
axii.set_yticks(tii)
return new_ticks
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
adjust_yaxis(ax2, (y1 - y2) / 2, v2)
adjust_yaxis(ax1, (y2 - y1) / 2, v1)
def adjust_yaxis(ax, ydif, v):
"""shift axis ax by ydiff, maintaining point v at the same location"""
inv = ax.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, ydif))
miny, maxy = ax.get_ylim()
miny, maxy = miny - v, maxy - v
if -miny > maxy or (-miny == maxy and dy > 0):
nminy = miny
nmaxy = miny * (maxy + dy) / (miny + dy)
else:
nmaxy = maxy
nminy = maxy * (miny + dy) / (maxy + dy)
ax.set_ylim(nminy + v, nmaxy + v)
def qualitative_cmap(n=2):
import matplotlib.colors as mcolors
if n == 2:
colorsList = [mcolors.BASE_COLORS['r'], mcolors.BASE_COLORS['g']]
cmap = mcolors.ListedColormap(colorsList)
elif n == 4:
colorsList = [
mcolors.BASE_COLORS['r'],
mcolors.BASE_COLORS['g'],
mcolors.BASE_COLORS['c'],
mcolors.BASE_COLORS['m']]
cmap = mcolors.ListedColormap(colorsList)
elif n == 5:
colorsList = [
mcolors.BASE_COLORS['r'],
mcolors.BASE_COLORS['g'],
mcolors.BASE_COLORS['c'],
mcolors.BASE_COLORS['m'],
mcolors.BASE_COLORS['b']]
cmap = mcolors.ListedColormap(colorsList)
return cmap
def caption(text, color='blue', **kwargs):
from termcolor import colored
print(colored('Caption:', color, attrs=['bold'], **kwargs))
print(colored(text, color, attrs=['bold'], **kwargs))
return
def adjust_lightness(color, amount=0.5):
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
def produce_colors_for_pwv_station(scope='annual', zebra=False,
as_dict=False, as_cat_dict=False):
import pandas as pd
stns = group_sites_to_xarray(scope=scope)
cdict = {'coastal': 'tab:blue',
'highland': 'tab:green',
'eastern': 'tab:orange'}
if as_cat_dict:
return cdict
# for grp, color in cdict.copy().items():
# cdict[grp] = to_rgba(get_named_colors_mapping()[
# color], alpha=1)
ds = stns.to_dataset('group')
colors = []
for group in ds:
sts = ds[group].dropna('GNSS').values
for i, st in enumerate(sts):
color = cdict.get(group)
if zebra:
if i % 2 != 0:
# rgba = np.array(rgba)
# rgba[-1] = 0.5
color = adjust_lightness(color, 0.5)
colors.append(color)
# colors = [item for sublist in colors for item in sublist]
stns = stns.T.values.ravel()
stns = stns[~pd.isnull(stns)]
if as_dict:
colors = dict(zip(stns, colors))
return colors
def fix_time_axis_ticks(ax, limits=None, margin=15):
import pandas as pd
import matplotlib.dates as mdates
if limits is not None:
ax.set_xlim(*pd.to_datetime(limits))
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(mdates.YearLocator())
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(mdates.MonthLocator())
# locator = mdates.AutoDateLocator(minticks=3, maxticks=7)
# formatter = mdates.ConciseDateFormatter(locator)
# ax.xaxis.set_major_locator(locator)
# ax.xaxis.set_major_formatter(formatter)
return ax
def plot_qflux_climatotlogy_israel(path=era5_path, save=True, reduce='mean',
plot_type='uv'):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
ds = xr.load_dataset(path / 'ERA5_UVQ_mm_israel_1979-2020.nc')
ds = ds.sel(expver=1).reset_coords(drop=True)
if plot_type == 'uv':
f1 = ds['q'] * ds['u']
f2 = ds['q'] * ds['v']
elif plot_type == 'md':
qu = ds['q'] * ds['u']
qv = ds['q'] * ds['v']
f1 = np.sqrt(qu**2 + qv**2)
f2 = np.rad2deg(np.arctan2(qv, qu))
if reduce == 'mean':
f1_clim = f1.groupby('time.month').mean().mean(
'longitude').mean('latitude')
f2_clim = f2.groupby('time.month').mean().mean(
'longitude').mean('latitude')
center = 0
cmap = 'bwr'
elif reduce == 'std':
f1_clim = f1.groupby('time.month').std().mean(
'longitude').mean('latitude')
f2_clim = f2.groupby('time.month').std().mean(
'longitude').mean('latitude')
center = None
cmap = 'viridis'
ds_clim = xr.concat([f1_clim, f2_clim], 'direction')
ds_clim['direction'] = ['zonal', 'meridional']
if plot_type == 'md':
fg, axes = plt.subplots(1, 2, figsize=(14, 7))
f1_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(levels=41,
yincrease=False,
cmap=cmap,
center=center, ax=axes[0])
f2_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(levels=41,
yincrease=False,
cmap=cmap,
center=center, ax=axes[1])
else:
fg = ds_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(
levels=41,
yincrease=False,
cmap=cmap,
center=center,
col='direction',
figsize=(
15,
6))
fg.fig.suptitle('Moisture flux climatology over Israel')
# fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# qu_clim.sel(level=slice(300,1000)).T.plot.contourf(levels=41, yincrease=False, ax=axes[0], cmap='bwr', center=0)
# qv_clim.sel(level=slice(300,1000)).T.plot.contourf(levels=41, yincrease=False, ax=axes[1], cmap='bwr', center=0)
fg.fig.subplots_adjust(top=0.923,
bottom=0.102,
left=0.058,
right=0.818,
hspace=0.2,
wspace=0.045)
if save:
filename = 'moisture_clim_from_ERA5_over_israel.png'
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return fg
def plot_mean_std_count(da_ts, time_reduce='hour', reduce='mean',
count_factor=1):
import xarray as xr
import seaborn as sns
"""plot mean, std and count of Xarray dataarray time-series"""
cmap = sns.color_palette("colorblind", 2)
time_dim = list(set(da_ts.dims))[0]
grp = '{}.{}'.format(time_dim, time_reduce)
if reduce == 'mean':
mean = da_ts.groupby(grp).mean()
elif reduce == 'median':
mean = da_ts.groupby(grp).median()
std = da_ts.groupby(grp).std()
mean_plus_std = mean + std
mean_minus_std = mean - std
count = da_ts.groupby(grp).count()
if isinstance(da_ts, xr.Dataset):
dvars = [x for x in da_ts.data_vars.keys()]
assert len(dvars) == 2
secondary_y = dvars[1]
else:
secondary_y = None
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False, figsize=(15, 15))
mean_df = mean.to_dataframe()
if secondary_y is not None:
axes[0] = mean_df[dvars[0]].plot(
ax=axes[0], linewidth=2.0, marker='o', color=cmap[0])
ax2mean = mean_df[secondary_y].plot(
ax=axes[0],
linewidth=2.0,
marker='s',
color=cmap[1],
secondary_y=True)
h1, l1 = axes[0].get_legend_handles_labels()
h2, l2 = axes[0].right_ax.get_legend_handles_labels()
handles = h1 + h2
labels = l1 + l2
axes[0].legend(handles, labels)
axes[0].fill_between(mean_df.index.values,
mean_minus_std[dvars[0]].values,
mean_plus_std[dvars[0]].values,
color=cmap[0],
alpha=0.5)
ax2mean.fill_between(
mean_df.index.values,
mean_minus_std[secondary_y].values,
mean_plus_std[secondary_y].values,
color=cmap[1],
alpha=0.5)
ax2mean.tick_params(axis='y', colors=cmap[1])
else:
mean_df.plot(ax=axes[0], linewidth=2.0, marker='o', color=cmap[0])
axes[0].fill_between(
mean_df.index.values,
mean_minus_std.values,
mean_plus_std.values,
color=cmap[0],
alpha=0.5)
axes[0].grid()
count_df = count.to_dataframe() / count_factor
count_df.plot.bar(ax=axes[1], rot=0)
axes[0].xaxis.set_tick_params(labelbottom=True)
axes[0].tick_params(axis='y', colors=cmap[0])
fig.tight_layout()
if secondary_y is not None:
return axes, ax2mean
else:
return axes
def plot_seasonal_histogram(da, dim='sound_time', xlim=None, xlabel=None,
suptitle=''):
fig_hist, axs = plt.subplots(2, 2, sharex=False, sharey=True,
figsize=(10, 8))
seasons = ['DJF', 'MAM', 'JJA', 'SON']
cmap = sns.color_palette("colorblind", 4)
for i, ax in enumerate(axs.flatten()):
da_season = da.sel(
{dim: da['{}.season'.format(dim)] == seasons[i]}).dropna(dim)
ax = sns.distplot(da_season, ax=ax, norm_hist=False,
color=cmap[i], hist_kws={'edgecolor': 'k'},
axlabel=xlabel,
label=seasons[i])
ax.set_xlim(xlim)
ax.legend()
# axes.set_xlabel('MLH [m]')
ax.set_ylabel('Frequency')
fig_hist.suptitle(suptitle)
fig_hist.tight_layout()
return axs
def plot_two_histograms_comparison(x, y, bins=None, labels=['x', 'y'],
ax=None, colors=['b', 'r']):
import numpy as np
import matplotlib.pyplot as plt
x_w = np.empty(x.shape)
x_w.fill(1/x.shape[0])
y_w = np.empty(y.shape)
y_w.fill(1/y.shape[0])
if ax is None:
fig, ax = plt.subplots()
ax.hist([x, y], bins=bins, weights=[x_w, y_w], color=colors,
label=labels)
ax.legend()
return ax
def plot_diurnal_wind_hodograph(path=ims_path, station='TEL-AVIV-COAST',
season=None, cmax=None, ax=None):
import xarray as xr
from metpy.plots import Hodograph
# import matplotlib
import numpy as np
colorbar = False
# from_list = matplotlib.colors.LinearSegmentedColormap.from_list
cmap = plt.cm.get_cmap('hsv', 24)
# cmap = from_list(None, plt.cm.jet(range(0,24)), 24)
U = xr.open_dataset(path / 'IMS_U_israeli_10mins.nc')
V = xr.open_dataset(path / 'IMS_V_israeli_10mins.nc')
u_sta = U[station]
v_sta = V[station]
u_sta.load()
v_sta.load()
if season is not None:
print('{} season selected'.format(season))
u_sta = u_sta.sel(time=u_sta['time.season'] == season)
v_sta = v_sta.sel(time=v_sta['time.season'] == season)
u = u_sta.groupby('time.hour').mean()
v = v_sta.groupby('time.hour').mean()
if ax is None:
colorbar = True
fig, ax = plt.subplots()
max_uv = max(max(u.values), max(v.values)) + 1
if cmax is None:
max_uv = max(max(u.values), max(v.values)) + 1
else:
max_uv = cmax
h = Hodograph(component_range=max_uv, ax=ax)
h.add_grid(increment=0.5)
# hours = np.arange(0, 25)
lc = h.plot_colormapped(u, v, u.hour, cmap=cmap,
linestyle='-', linewidth=2)
#ticks = np.arange(np.min(hours), np.max(hours))
# cb = fig.colorbar(lc, ticks=range(0,24), label='Time of Day [UTC]')
if colorbar:
cb = ax.figure.colorbar(lc, ticks=range(
0, 24), label='Time of Day [UTC]')
# cb.ax.tick_params(length=0)
if season is None:
ax.figure.suptitle('{} diurnal wind Hodograph'.format(station))
else:
ax.figure.suptitle(
'{} diurnal wind Hodograph {}'.format(station, season))
ax.set_xlabel('North')
ax.set_ylabel('East')
ax.set_title('South')
ax2 = ax.twinx()
ax2.tick_params(axis='y', right=False, labelright=False)
ax2.set_ylabel('West')
# axcb = fig.colorbar(lc)
return ax
def plot_MLR_GNSS_PW_harmonics_facetgrid(path=work_yuval, season='JJA',
n_max=2, ylim=None, scope='diurnal',
save=True, era5=False, leg_size=15):
"""
Parameters
----------
path : TYPE, optional
DESCRIPTION. The default is work_yuval.
season : TYPE, optional
DESCRIPTION. The default is 'JJA'.
n_max : TYPE, optional
DESCRIPTION. The default is 2.
ylim : TYPE, optional
the ylimits of each panel use [-6,8] for annual. The default is None.
scope : TYPE, optional
DESCRIPTION. The default is 'diurnal'.
save : TYPE, optional
DESCRIPTION. The default is True.
era5 : TYPE, optional
DESCRIPTION. The default is False.
leg_size : TYPE, optional
DESCRIPTION. The default is 15.
Returns
-------
None.
"""
import xarray as xr
from aux_gps import run_MLR_harmonics
from matplotlib.ticker import AutoMinorLocator
from PW_stations import produce_geo_gnss_solved_stations
import numpy as np
sns.set_style('whitegrid')
sns.set_style('ticks')
geo = produce_geo_gnss_solved_stations(add_distance_to_coast=True, plot=False)
if scope == 'diurnal':
cunits = 'cpd'
ticks = np.arange(0, 23, 3)
xlabel = 'Hour of day [UTC]'
elif scope == 'annual':
cunits = 'cpy'
ticks = np.arange(1, 13, 1)
xlabel = 'month'
print('producing {} harmonics plot.'.format(scope))
if era5:
harmonics = xr.load_dataset(path / 'GNSS_PW_era5_harmonics_{}.nc'.format(scope))
else:
harmonics = xr.load_dataset(path / 'GNSS_PW_harmonics_{}.nc'.format(scope))
# sites = sorted(list(set([x.split('_')[0] for x in harmonics])))
# da = xr.DataArray([x for x in range(len(sites))], dims='GNSS')
# da['GNSS'] = sites
sites = group_sites_to_xarray(upper=False, scope=scope)
sites_flat = [x for x in sites.values.flatten()]
da = xr.DataArray([x for x in range(len(sites_flat))], dims='GNSS')
da['GNSS'] = [x for x in range(len(da))]
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=3,
sharex=False,
sharey=False, figsize=(20, 20))
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
site = sites.values[i, j]
ax = fg.axes[i, j]
try:
harm_site = harmonics[[x for x in harmonics if site in x]]
if site in ['nrif']:
leg_loc = 'upper center'
elif site in ['yrcm', 'ramo']:
leg_loc = 'lower center'
# elif site in ['katz']:
# leg_loc = 'upper right'
else:
leg_loc = None
if scope == 'annual':
leg_loc = 'upper left'
ax, handles, labels = run_MLR_harmonics(harm_site, season=season,
cunits=cunits,
n_max=n_max, plot=True, ax=ax,
legend_loc=leg_loc, ncol=1,
legsize=leg_size, lw=2.5,
legend_S_only=True)
ax.set_xlabel(xlabel, fontsize=16)
if ylim is not None:
ax.set_ylim(*ylim)
ax.tick_params(axis='x', which='major', labelsize=18)
# if scope == 'diurnal':
ax.yaxis.set_major_locator(plt.MaxNLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.tick_params(axis='y', which='major', labelsize=18)
ax.yaxis.tick_left()
ax.xaxis.set_ticks(ticks)
ax.grid()
ax.set_title('')
ax.set_ylabel('')
ax.grid(axis='y', which='minor', linestyle='--')
# get this for upper legend:
# handles, labels = ax.get_legend_handles_labels()
if scope == 'annual':
site_label = '{} ({:.0f})'.format(
site.upper(), geo.loc[site].alt)
label_coord = [0.52, 0.87]
fs = 18
elif scope == 'diurnal':
site_label = site.upper()
label_coord = [0.1, 0.85]
fs = 20
ax.text(*label_coord, site_label,
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes, fontsize=fs)
if j == 0:
ax.set_ylabel('PWV anomalies [mm]', fontsize=16)
# if j == 0:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
# elif j == 1:
# if i>5:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
except TypeError:
print('{}, {} axis off'.format(i, j))
ax.set_axis_off()
# for i, (site, ax) in enumerate(zip(da['GNSS'].values, fg.axes.flatten())):
# harm_site = harmonics[[x for x in harmonics if sites[i] in x]]
# if site in ['elat', 'nrif']:
# loc = 'upper center'
# text = 0.1
# elif site in ['elro', 'yrcm', 'ramo', 'slom', 'jslm']:
# loc = 'upper right'
# text = 0.1
# else:
# loc = None
# text = 0.1
# ax = run_MLR_diurnal_harmonics(harm_site, season=season, n_max=n_max, plot=True, ax=ax, legend_loc=loc)
# ax.set_title('')
# ax.set_ylabel('PW anomalies [mm]')
# if ylim is not None:
# ax.set_ylim(ylim[0], ylim[1])
# ax.text(text, .85, site.upper(),
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
# for i, ax in enumerate(fg.axes.flatten()):
# if i > (da.GNSS.telasize-1):
# ax.set_axis_off()
# pass
# add upper legend for all factes:
S_labels = labels[:-2]
S_labels = [x.split(' ')[0] for x in S_labels]
last_label = 'Mean PWV anomalies'
sum_label = labels[-2].split("'")[1]
S_labels.append(sum_label)
S_labels.append(last_label)
fg.fig.legend(handles=handles, labels=S_labels, prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.032,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
if save:
if era5:
filename = 'pw_era5_{}_harmonics_{}_{}.png'.format(scope, n_max, season)
else:
filename = 'pw_{}_harmonics_{}_{}.png'.format(scope, n_max, season)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_gustiness(path=work_yuval, ims_path=ims_path, site='tela',
ims_site='HAIFA-TECHNION', season='JJA', month=None, pts=7,
ax=None):
import xarray as xr
import numpy as np
g = xr.open_dataset(
ims_path / 'IMS_G{}_israeli_10mins_daily_anoms.nc'.format(pts))[ims_site]
g.load()
if season is not None:
g = g.sel(time=g['time.season'] == season)
label = 'Gustiness {} IMS station in {} season'.format(
site, season)
elif month is not None:
g = g.sel(time=g['time.month'] == month)
label = 'Gustiness {} IMS station in {} month'.format(
site, month)
elif season is not None and month is not None:
raise('pls pick either season or month...')
# date = groupby_date_xr(g)
# # g_anoms = g.groupby('time.month') - g.groupby('time.month').mean('time')
# g_anoms = g.groupby(date) - g.groupby(date).mean('time')
# g_anoms = g_anoms.reset_coords(drop=True)
G = g.groupby('time.hour').mean('time') * 100.0
if ax is None:
fig, ax = plt.subplots(figsize=(16, 8))
Gline = G.plot(ax=ax, color='b', marker='o', label='Gustiness')
ax.set_title(label)
ax.axhline(0, color='b', linestyle='--')
ax.set_ylabel('Gustiness anomalies [dimensionless]', color='b')
ax.set_xlabel('Time of day [UTC]')
# ax.set_xticks(np.arange(0, 24, step=1))
ax.yaxis.label.set_color('b')
ax.tick_params(axis='y', colors='b')
ax.xaxis.set_ticks(np.arange(0, 23, 3))
ax.grid()
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_hourly_anoms_thresh_50_homogenized.nc')[site]
pw.load().dropna('time')
if season is not None:
pw = pw.sel(time=pw['time.season'] == season)
elif month is not None:
pw = pw.sel(time=pw['time.month'] == month)
# date = groupby_date_xr(pw)
# pw = pw.groupby(date) - pw.groupby(date).mean('time')
# pw = pw.reset_coords(drop=True)
pw = pw.groupby('time.hour').mean()
axpw = ax.twinx()
PWline = pw.plot.line(ax=axpw, color='tab:green',
marker='s', label='PW ({})'.format(season))
axpw.axhline(0, color='k', linestyle='--')
lns = Gline + PWline
axpw.set_ylabel('PW anomalies [mm]')
align_yaxis(ax, 0, axpw, 0)
return lns
def plot_gustiness_facetgrid(path=work_yuval, ims_path=ims_path,
season='JJA', month=None, save=True):
import xarray as xr
gnss_ims_dict = {
'alon': 'ASHQELON-PORT', 'bshm': 'HAIFA-TECHNION', 'csar': 'HADERA-PORT',
'tela': 'TEL-AVIV-COAST', 'slom': 'BESOR-FARM', 'kabr': 'SHAVE-ZIYYON',
'nzrt': 'DEIR-HANNA', 'katz': 'GAMLA', 'elro': 'MEROM-GOLAN-PICMAN',
'mrav': 'MAALE-GILBOA', 'yosh': 'ARIEL', 'jslm': 'JERUSALEM-GIVAT-RAM',
'drag': 'METZOKE-DRAGOT', 'dsea': 'SEDOM', 'ramo': 'MIZPE-RAMON-20120927',
'nrif': 'NEOT-SMADAR', 'elat': 'ELAT', 'klhv': 'SHANI',
'yrcm': 'ZOMET-HANEGEV', 'spir': 'PARAN-20060124'}
da = xr.DataArray([x for x in gnss_ims_dict.values()], dims=['GNSS'])
da['GNSS'] = [x for x in gnss_ims_dict.keys()]
to_remove = ['kabr', 'nzrt', 'katz', 'elro', 'klhv', 'yrcm', 'slom']
sites = [x for x in da['GNSS'].values if x not in to_remove]
da = da.sel(GNSS=sites)
gnss_order = ['bshm', 'mrav', 'drag', 'csar', 'yosh', 'dsea', 'tela', 'jslm',
'nrif', 'alon', 'ramo', 'elat']
df = da.to_dataframe('gnss')
da = df.reindex(gnss_order).to_xarray()['gnss']
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=3,
sharex=False,
sharey=False, figsize=(20, 20))
for i, (site, ax) in enumerate(zip(da['GNSS'].values, fg.axes.flatten())):
lns = plot_gustiness(path=path, ims_path=ims_path,
ims_site=gnss_ims_dict[site],
site=site, season=season, month=month, ax=ax)
labs = [l.get_label() for l in lns]
if site in ['tela', 'alon', 'dsea', 'csar', 'elat', 'nrif']:
ax.legend(lns, labs, loc='upper center', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
elif site in ['drag']:
ax.legend(lns, labs, loc='upper right', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
else:
ax.legend(lns, labs, loc='best', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
ax.set_title('')
ax.set_ylabel(r'G anomalies $\times$$10^{2}$')
# ax.text(.8, .85, site.upper(),
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
for i, ax in enumerate(fg.axes.flatten()):
if i > (da.GNSS.size-1):
ax.set_axis_off()
pass
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.974,
bottom=0.053,
left=0.041,
right=0.955,
hspace=0.15,
wspace=0.3)
filename = 'gustiness_israeli_gnss_pw_diurnal_{}.png'.format(season)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_fft_diurnal(path=work_yuval, save=True):
import xarray as xr
import numpy as np
import matplotlib.ticker as tck
sns.set_style("whitegrid",
{'axes.grid': True,
'xtick.bottom': True,
'font.family': 'serif',
'ytick.left': True})
sns.set_context('paper')
power = xr.load_dataset(path / 'GNSS_PW_power_spectrum_diurnal.nc')
power = power.to_array('site')
sites = [x for x in power.site.values]
fg = power.plot.line(col='site', col_wrap=4,
sharex=False, figsize=(20, 18))
fg.set_xlabels('Frequency [cpd]')
fg.set_ylabels('PW PSD [dB]')
ticklabels = np.arange(0, 7)
for ax, site in zip(fg.axes.flatten(), sites):
sns.despine()
ax.set_title('')
ax.set_xticklabels(ticklabels)
# ax.tick_params(axis='y', which='minor')
ax.yaxis.set_minor_locator(tck.AutoMinorLocator())
ax.set_xlim(0, 6.5)
ax.set_ylim(70, 125)
ax.grid(True)
ax.grid(which='minor', axis='y')
ax.text(.8, .85, site.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
fg.fig.tight_layout()
filename = 'power_pw_diurnal.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_rinex_availability_with_map(path=work_yuval, gis_path=gis_path,
scope='diurnal', ims=True,
dem_path=dem_path, fontsize=18, save=True):
# TODO: add box around merged stations and removed stations
# TODO: add color map labels to stations removed and merged
from aux_gps import gantt_chart
import xarray as xr
import pandas as pd
import geopandas as gpd
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import geo_annotate
from ims_procedures import produce_geo_ims
from matplotlib.colors import ListedColormap
from aux_gps import path_glob
sns.set_style('whitegrid')
sns.set_style('ticks')
print('{} scope selected.'.format(scope))
fig = plt.figure(figsize=(20, 15))
# grid = plt.GridSpec(1, 2, width_ratios=[
# 5, 2], wspace=0.1)
grid = plt.GridSpec(1, 2, width_ratios=[
5, 3], wspace=0.05)
ax_gantt = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_map = fig.add_subplot(grid[0, 1]) # plt.subplot(122)
# fig, ax = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(20, 6))
# RINEX gantt chart:
if scope == 'diurnal':
file = path_glob(path, 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')[-1]
elif scope == 'annual':
file = path / 'GNSS_PW_monthly_thresh_50.nc'
ds = xr.open_dataset(file)
just_pw = [x for x in ds if 'error' not in x]
ds = ds[just_pw]
da = ds.to_array('station').sel(time=slice(None,'2019'))
da['station'] = [x.upper() for x in da.station.values]
ds = da.to_dataset('station')
# reorder for annual, coastal, highland and eastern:
stns = group_sites_to_xarray(scope='annual', upper=True).T.values.ravel()
stns = stns[~pd.isnull(stns)]
ds = ds[stns]
# colors:
colors = produce_colors_for_pwv_station(scope=scope, zebra=False)
title = 'Daily RINEX files availability for the Israeli GNSS stations'
ax_gantt = gantt_chart(
ds,
ax=ax_gantt,
fw='bold', grid=True,
title='', colors=colors,
pe_dict=None, fontsize=fontsize, linewidth=24, antialiased=False)
years_fmt = mdates.DateFormatter('%Y')
# ax_gantt.xaxis.set_major_locator(mdates.YearLocator())
ax_gantt.xaxis.set_major_locator(mdates.YearLocator(4))
ax_gantt.xaxis.set_minor_locator(mdates.YearLocator(1))
ax_gantt.xaxis.set_major_formatter(years_fmt)
# ax_gantt.xaxis.set_minor_formatter(years_fmt)
ax_gantt.tick_params(axis='x', labelrotation=0)
# Israel gps ims map:
ax_map = plot_israel_map(
gis_path=gis_path, ax=ax_map, ticklabelsize=fontsize)
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
fg = dem.plot.imshow(ax=ax_map, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
# scale_bar(ax_map, 50)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level',
size=fontsize, weight='normal')
cb.ax.tick_params(labelsize=fontsize)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
gps = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
# removed = ['hrmn', 'nizn', 'spir']
# removed = ['hrmn']
if scope == 'diurnal':
removed = ['hrmn', 'gilb', 'lhav']
elif scope == 'annual':
removed = ['hrmn', 'gilb', 'lhav']
print('removing {} stations from map.'.format(removed))
# merged = ['klhv', 'lhav', 'mrav', 'gilb']
merged = []
gps_list = [x for x in gps.index if x not in merged and x not in removed]
gps.loc[gps_list, :].plot(ax=ax_map, edgecolor='black', marker='s',
alpha=1.0, markersize=35, facecolor="None", linewidth=2, zorder=3)
# gps.loc[removed, :].plot(ax=ax_map, color='black', edgecolor='black', marker='s',
# alpha=1.0, markersize=25, facecolor='white')
# gps.loc[merged, :].plot(ax=ax_map, color='black', edgecolor='r', marker='s',
# alpha=0.7, markersize=25)
gps_stations = gps_list # [x for x in gps.index]
# to_plot_offset = ['mrav', 'klhv', 'nzrt', 'katz', 'elro']
to_plot_offset = []
for x, y, label in zip(gps.loc[gps_stations, :].lon, gps.loc[gps_stations,
:].lat, gps.loc[gps_stations, :].index.str.upper()):
if label.lower() in to_plot_offset:
ax_map.annotate(label, xy=(x, y), xytext=(4, -6),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
else:
ax_map.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
# geo_annotate(ax_map, gps_normal_anno.lon, gps_normal_anno.lat,
# gps_normal_anno.index.str.upper(), xytext=(3, 3), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# geo_annotate(ax_map, gps_offset_anno.lon, gps_offset_anno.lat,
# gps_offset_anno.index.str.upper(), xytext=(4, -6), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# plot bet-dagan:
df = pd.Series([32.00, 34.81]).to_frame().T
df.index = ['Bet-Dagan']
df.columns = ['lat', 'lon']
bet_dagan = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=gps.crs)
bet_dagan.plot(ax=ax_map, color='black', edgecolor='black',
marker='x', linewidth=2, zorder=2)
geo_annotate(ax_map, bet_dagan.lon, bet_dagan.lat,
bet_dagan.index, xytext=(4, -6), fmt=None,
c='k', fw='bold', fs=fontsize - 2, colorupdown=False)
# plt.legend(['GNSS \nreceiver sites',
# 'removed \nGNSS sites',
# 'merged \nGNSS sites',
# 'radiosonde\nstation'],
# loc='upper left', framealpha=0.7, fancybox=True,
# handletextpad=0.2, handlelength=1.5)
if ims:
print('getting IMS temperature stations metadata...')
ims = produce_geo_ims(path=gis_path, freq='10mins', plot=False)
ims.plot(ax=ax_map, marker='o', edgecolor='tab:orange', alpha=1.0,
markersize=35, facecolor="tab:orange", zorder=1)
# ims, gps = produce_geo_df(gis_path=gis_path, plot=False)
print('getting solved GNSS israeli stations metadata...')
plt.legend(['GNSS \nstations',
'radiosonde\nstation', 'IMS stations'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
else:
plt.legend(['GNSS \nstations',
'radiosonde\nstation'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
fig.subplots_adjust(top=0.95,
bottom=0.11,
left=0.05,
right=0.95,
hspace=0.2,
wspace=0.2)
# plt.legend(['IMS stations', 'GNSS stations'], loc='upper left')
filename = 'rinex_israeli_gnss_map_{}.png'.format(scope)
# caption('Daily RINEX files availability for the Israeli GNSS station network at the SOPAC/GARNER website')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_means_box_plots(path=work_yuval, thresh=50, kind='box',
x='month', col_wrap=5, ylimits=None, twin=None,
twin_attrs=None,
xlimits=None, anoms=True, bins=None,
season=None, attrs_plot=True, save=True, ds_input=None):
import xarray as xr
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
attrs = [x.attrs for x in pw.data_vars.values()]
if x == 'month':
pw = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_{:.0f}_homogenized.nc'.format(thresh))
# pw = pw.resample(time='MS').mean('time')
elif x == 'hour':
# pw = pw.resample(time='1H').mean('time')
# pw = pw.groupby('time.hour').mean('time')
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_hourly_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
# first remove long term monthly means:
if anoms:
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_hourly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
if twin is not None:
twin = twin.groupby('time.month') - \
twin.groupby('time.month').mean('time')
twin = twin.reset_coords(drop=True)
# pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
elif x == 'day':
# pw = pw.resample(time='1H').mean('time')
# pw = pw.groupby('time.hour').mean('time')
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_daily_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
# first remove long term monthly means:
if anoms:
# pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
pw = pw.groupby('time.dayofyear') - \
pw.groupby('time.dayodyear').mean('time')
if season is not None:
if season != 'all':
print('{} season is selected'.format(season))
pw = pw.sel(time=pw['time.season'] == season)
all_seas = False
if twin is not None:
twin = twin.sel(time=twin['time.season'] == season)
else:
print('all seasons selected')
all_seas = True
else:
all_seas = False
for i, da in enumerate(pw.data_vars):
pw[da].attrs = attrs[i]
if not attrs_plot:
attrs = None
if ds_input is not None:
# be carful!:
pw = ds_input
fg = plot_multi_box_xr(pw, kind=kind, x=x, col_wrap=col_wrap,
ylimits=ylimits, xlimits=xlimits, attrs=attrs,
bins=bins, all_seasons=all_seas, twin=twin,
twin_attrs=twin_attrs)
attrs = [x.attrs for x in pw.data_vars.values()]
for i, ax in enumerate(fg.axes.flatten()):
try:
mean_years = float(attrs[i]['mean_years'])
# print(i)
# print(mean_years)
except IndexError:
ax.set_axis_off()
pass
if kind != 'hist':
[fg.axes[x, 0].set_ylabel('PW [mm]')
for x in range(len(fg.axes[:, 0]))]
# [fg.axes[-1, x].set_xlabel('month') for x in range(len(fg.axes[-1, :]))]
fg.fig.subplots_adjust(top=0.98,
bottom=0.05,
left=0.025,
right=0.985,
hspace=0.27,
wspace=0.215)
if season is not None:
filename = 'pw_{}ly_means_{}_seas_{}.png'.format(x, kind, season)
else:
filename = 'pw_{}ly_means_{}.png'.format(x, kind)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_interannual_MLR_results(path=climate_path, fontsize=16, save=True):
import matplotlib.pyplot as plt
from climate_works import run_best_MLR
# rds = xr.load_dataset(path / 'best_MLR_interannual_gnss_pwv.nc')
model_lci, rdf_lci = run_best_MLR(plot=False, heatmap=False, keep='lci',
add_trend=True)
rds_lci = model_lci.results_
model_eofi, rdf_eofi = run_best_MLR(plot=False, heatmap=False, keep='eofi',
add_trend=False)
rds_eofi = model_eofi.results_
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False, figsize=(15, 7))
origln = rds_lci['original'].plot.line('k-.', ax=axes[0], linewidth=1.5)
predln_lci = rds_lci['predict'].plot.line('b-', ax=axes[0], linewidth=1.5)
predln_eofi = rds_eofi['predict'].plot.line(
'g-', ax=axes[0], linewidth=1.5)
r2_lci = rds_lci['r2_adj'].item()
r2_eofi = rds_eofi['r2_adj'].item()
axes[0].legend(origln+predln_lci+predln_eofi, ['mean PWV (12m-mean)', 'MLR with LCI (Adj R$^2$:{:.2f})'.format(
r2_lci), 'MLR with EOFs (Adj R$^2$:{:.2f})'.format(r2_eofi)], fontsize=fontsize-2)
axes[0].grid()
axes[0].set_xlabel('')
axes[0].set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
axes[0].tick_params(labelsize=fontsize)
axes[0].grid(which='minor', color='k', linestyle='--')
residln_lci = rds_lci['resid'].plot.line('b-', ax=axes[1])
residln_eofi = rds_eofi['resid'].plot.line('g-', ax=axes[1])
axes[1].legend(residln_lci+residln_eofi, ['MLR with LCI',
'MLR with EOFs'], fontsize=fontsize-2)
axes[1].grid()
axes[1].set_ylabel('Residuals [mm]', fontsize=fontsize)
axes[1].tick_params(labelsize=fontsize)
axes[1].set_xlabel('')
years_fmt = mdates.DateFormatter('%Y')
# ax.figure.autofmt_xdate()
axes[1].xaxis.set_major_locator(mdates.YearLocator(2))
axes[1].xaxis.set_minor_locator(mdates.YearLocator(1))
axes[1].xaxis.set_major_formatter(years_fmt)
axes[1].grid(which='minor', color='k', linestyle='--')
# ax.xaxis.set_minor_locator(mdates.MonthLocator())
axes[1].figure.autofmt_xdate()
fig.tight_layout()
fig.subplots_adjust()
if save:
filename = 'pw_interannual_MLR_comparison.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_annual_pw(path=work_yuval, fontsize=20, labelsize=18, compare='uerra',
ylim=[7.5, 40], save=True, kind='violin', bins=None, ds=None,
add_temperature=False):
"""kind can be violin or hist, for violin choose ylim=7.5,40 and for hist
choose ylim=0,0.3"""
import xarray as xr
import pandas as pd
import numpy as np
from synoptic_procedures import slice_xr_with_synoptic_class
gnss_filename = 'GNSS_PW_monthly_thresh_50.nc'
# gnss_filename = 'first_climatol_try.nc'
pw = xr.load_dataset(path / gnss_filename)
df_annual = pw.to_dataframe()
hue = None
if compare is not None:
df_annual = prepare_reanalysis_monthly_pwv_to_dataframe(
path, re=compare, ds=ds)
hue = 'source'
if not add_temperature:
fg = plot_pw_geographical_segments(
df_annual, scope='annual',
kind=kind,
fg=None,
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, hue=hue,
save=False, bins=bins)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.029,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
filename = 'pw_annual_means_{}.png'.format(kind)
else:
fg = plot_pw_geographical_segments(
df_annual, scope='annual',
kind='mean_month',
fg=None, ticklabelcolor='tab:blue',
ylim=[10, 31], color='tab:blue',
fontsize=fontsize,
labelsize=labelsize, hue=None,
save=False, bins=None)
# tmm = xr.load_dataset(path / 'GNSS_TD_monthly_1996_2020.nc')
tmm = xr.load_dataset(path / 'IMS_T/GNSS_TD_daily.nc')
tmm = tmm.groupby('time.month').mean()
dftm = tmm.to_dataframe()
# dftm.columns = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
sites = group_sites_to_xarray(scope='annual')
sites_flat = sites.values.ravel()
# sites = sites[~pd.isnull(sites)]
for i, ax in enumerate(fg.axes.flat):
if pd.isnull(sites_flat[i]):
continue
twinax = ax.twinx()
twinax.plot(dftm.index.values, dftm[sites_flat[i]].values, color='tab:red',
markersize=10, marker='s', lw=1, markerfacecolor="None",
label='Temperature')
# dftm[sites[i]].plot(ax=twinax, color='r', markersize=10,
# marker='s', lw=1, markerfacecolor="None")
twinax.set_ylim(5, 37)
twinax.set_yticks(np.arange(5, 40, 10))
twinax.tick_params(axis='y', which='major', labelcolor='tab:red',
labelsize=labelsize)
if sites_flat[i] in sites.sel(group='eastern'):
twinax.set_ylabel(r'Temperature [$\degree$ C]',
fontsize=labelsize)
# fg.fig.canvas.draw()
# twinax.xaxis.set_ticks(np.arange(1, 13))
# twinax.tick_params(axis='x', which='major', labelsize=labelsize-2)
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = twinax.get_legend_handles_labels()
labels = ['PWV', 'Surface Temperature']
fg.fig.legend(handles=lines+lines2, labels=labels, prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.97,
bottom=0.029,
left=0.049,
right=0.96,
hspace=0.15,
wspace=0.17)
filename = 'pw_annual_means_temperature.png'
if save:
if compare is not None:
filename = 'pw_annual_means_{}_with_{}.png'.format(kind, compare)
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_multi_box_xr(pw, kind='violin', x='month', sharex=False, sharey=False,
col_wrap=5, ylimits=None, xlimits=None, attrs=None,
bins=None, all_seasons=False, twin=None, twin_attrs=None):
import xarray as xr
pw = pw.to_array('station')
if twin is not None:
twin = twin.to_array('station')
fg = xr.plot.FacetGrid(pw, col='station', col_wrap=col_wrap, sharex=sharex,
sharey=sharey)
for i, (sta, ax) in enumerate(zip(pw['station'].values, fg.axes.flatten())):
pw_sta = pw.sel(station=sta).reset_coords(drop=True)
if all_seasons:
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'DJF')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='o')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'MAM')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='^')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'JJA')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='s')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'SON')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i], bins=bins,
marker='x')
df = pw_sta.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i], bins=bins,
marker='d')
if sta == 'nrif' or sta == 'elat':
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='upper center', framealpha=0.5, fancybox=True)
elif sta == 'yrcm' or sta == 'ramo':
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='upper right', framealpha=0.5, fancybox=True)
else:
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='best', framealpha=0.5, fancybox=True)
else:
# if x == 'hour':
# # remove seasonal signal:
# pw_sta = pw_sta.groupby('time.dayofyear') - pw_sta.groupby('time.dayofyear').mean('time')
# elif x == 'month':
# # remove daily signal:
# pw_sta = pw_sta.groupby('time.hour') - pw_sta.groupby('time.hour').mean('time')
df = pw_sta.to_dataframe(sta)
if twin is not None:
twin_sta = twin.sel(station=sta).reset_coords(drop=True)
twin_df = twin_sta.to_dataframe(sta)
else:
twin_df = None
if attrs is not None:
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i],
bins=bins, twin_df=twin_df, twin_attrs=twin_attrs)
else:
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None,
bins=bins, twin_df=twin_df, twin_attrs=twin_attrs)
return fg
def plot_box_df(df, x='month', title='TELA', marker='o',
ylabel=r'IWV [kg$\cdot$m$^{-2}$]', ax=None, kind='violin',
ylimits=(5, 40), xlimits=None, attrs=None, bins=None, twin_df=None,
twin_attrs=None):
# x=hour is experimental
import seaborn as sns
from matplotlib.ticker import MultipleLocator
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import kurtosis
from scipy.stats import skew
# df = da_ts.to_dataframe()
if x == 'month':
df[x] = df.index.month
pal = sns.color_palette("Paired", 12)
elif x == 'hour':
df[x] = df.index.hour
if twin_df is not None:
twin_df[x] = twin_df.index.hour
# df[x] = df.index
pal = sns.color_palette("Paired", 12)
y = df.columns[0]
if ax is None:
fig, ax = plt.subplots()
if kind is None:
df = df.groupby(x).mean()
df.plot(ax=ax, legend=False, marker=marker)
if twin_df is not None:
twin_df = twin_df.groupby(x).mean()
twinx = ax.twinx()
twin_df.plot.line(ax=twinx, color='r', marker='s')
ax.axhline(0, color='k', linestyle='--')
if twin_attrs is not None:
twinx.set_ylabel(twin_attrs['ylabel'])
align_yaxis(ax, 0, twinx, 0)
ax.set_xlabel('Time of day [UTC]')
elif kind == 'violin':
sns.violinplot(ax=ax, data=df, x=x, y=y, palette=pal, fliersize=4,
gridsize=250, inner='quartile', scale='area')
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xlabel('')
elif kind == 'box':
kwargs = dict(markerfacecolor='r', marker='o')
sns.boxplot(ax=ax, data=df, x=x, y=y, palette=pal, fliersize=4,
whis=1.0, flierprops=kwargs, showfliers=False)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xlabel('')
elif kind == 'hist':
if bins is None:
bins = 15
a = df[y].dropna()
sns.distplot(ax=ax, a=a, norm_hist=True, bins=bins, axlabel='PW [mm]')
xmean = df[y].mean()
xmedian = df[y].median()
std = df[y].std()
sk = skew(df[y].dropna().values)
kurt = kurtosis(df[y].dropna().values)
# xmode = df[y].mode().median()
data_x, data_y = ax.lines[0].get_data()
ymean = np.interp(xmean, data_x, data_y)
ymed = np.interp(xmedian, data_x, data_y)
# ymode = np.interp(xmode, data_x, data_y)
ax.vlines(x=xmean, ymin=0, ymax=ymean, color='r', linestyle='--')
ax.vlines(x=xmedian, ymin=0, ymax=ymed, color='g', linestyle='-')
# ax.vlines(x=xmode, ymin=0, ymax=ymode, color='k', linestyle='-')
# ax.legend(['Mean:{:.1f}'.format(xmean),'Median:{:.1f}'.format(xmedian),'Mode:{:.1f}'.format(xmode)])
ax.legend(['Mean: {:.1f}'.format(xmean),
'Median: {:.1f}'.format(xmedian)])
ax.text(0.55, 0.45, "Std-Dev: {:.1f}\nSkewness: {:.1f}\nKurtosis: {:.1f}".format(
std, sk, kurt), transform=ax.transAxes)
ax.yaxis.set_minor_locator(MultipleLocator(5))
ax.yaxis.grid(True, which='minor', linestyle='--', linewidth=1, alpha=0.7)
ax.yaxis.grid(True, linestyle='--', linewidth=1, alpha=0.7)
title = ax.get_title().split('=')[-1].strip(' ')
if attrs is not None:
mean_years = float(attrs['mean_years'])
ax.set_title('')
ax.text(.2, .85, y.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
if kind is not None:
if kind != 'hist':
ax.text(.22, .72, '{:.1f} years'.format(mean_years),
horizontalalignment='center',
transform=ax.transAxes)
ax.yaxis.tick_left()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
if ylimits is not None:
ax.set_ylim(*ylimits)
if twin_attrs is not None:
twinx.set_ylim(*twin_attrs['ylimits'])
align_yaxis(ax, 0, twinx, 0)
if xlimits is not None:
ax.set_xlim(*xlimits)
return ax
def plot_means_pw(load_path=work_yuval, ims_path=ims_path, thresh=50,
col_wrap=5, means='hour', save=True):
import xarray as xr
import numpy as np
pw = xr.load_dataset(
work_yuval /
'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
if means == 'hour':
# remove long term monthly means:
pw_clim = pw.groupby('time.month') - \
pw.groupby('time.month').mean('time')
pw_clim = pw_clim.groupby('time.{}'.format(means)).mean('time')
else:
pw_clim = pw.groupby('time.{}'.format(means)).mean('time')
# T = xr.load_dataset(
# ims_path /
# 'GNSS_5mins_TD_ALL_1996_2020.nc')
# T_clim = T.groupby('time.month').mean('time')
attrs = [x.attrs for x in pw.data_vars.values()]
fg = pw_clim.to_array('station').plot(col='station', col_wrap=col_wrap,
color='b', marker='o', alpha=0.7,
sharex=False, sharey=True)
col_arr = np.arange(0, len(pw_clim))
right_side = col_arr[col_wrap-1::col_wrap]
for i, ax in enumerate(fg.axes.flatten()):
title = ax.get_title().split('=')[-1].strip(' ')
try:
mean_years = float(attrs[i]['mean_years'])
ax.set_title('')
ax.text(.2, .85, title.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
ax.text(.2, .73, '{:.1f} years'.format(mean_years),
horizontalalignment='center',
transform=ax.transAxes)
# ax_t = ax.twinx()
# T_clim['{}'.format(title)].plot(
# color='r', linestyle='dashed', marker='s', alpha=0.7,
# ax=ax_t)
# ax_t.set_ylim(0, 30)
fg.fig.canvas.draw()
# labels = [item.get_text() for item in ax_t.get_yticklabels()]
# ax_t.yaxis.set_ticklabels([])
# ax_t.tick_params(axis='y', color='r')
# ax_t.set_ylabel('')
# if i in right_side:
# ax_t.set_ylabel(r'Surface temperature [$\degree$C]', fontsize=10)
# ax_t.yaxis.set_ticklabels(labels)
# ax_t.tick_params(axis='y', labelcolor='r', color='r')
# show months ticks and grid lines for pw:
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.yaxis.grid()
# ax.legend([ax.lines[0], ax_t.lines[0]], ['PW', 'T'],
# loc='upper right', fontsize=10, prop={'size': 8})
# ax.legend([ax.lines[0]], ['PW'],
# loc='upper right', fontsize=10, prop={'size': 8})
except IndexError:
pass
# change bottom xticks to 1-12 and show them:
# fg.axes[-1, 0].xaxis.set_ticks(np.arange(1, 13))
[fg.axes[x, 0].set_ylabel('PW [mm]') for x in range(len(fg.axes[:, 0]))]
# adjust subplots:
fg.fig.subplots_adjust(top=0.977,
bottom=0.039,
left=0.036,
right=0.959,
hspace=0.185,
wspace=0.125)
filename = 'PW_{}_climatology.png'.format(means)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_gnss_radiosonde_monthly_means(sound_path=sound_path, path=work_yuval,
times=['2014', '2019'], sample='MS',
gps_station='tela', east_height=5000):
import xarray as xr
from aux_gps import path_glob
import pandas as pd
file = path_glob(sound_path, 'bet_dagan_phys_PW_Tm_Ts_*.nc')
phys = xr.load_dataset(file[0])['PW']
if east_height is not None:
file = path_glob(sound_path, 'bet_dagan_edt_sounding*.nc')
east = xr.load_dataset(file[0])['east_distance']
east = east.resample(sound_time=sample).mean().sel(
Height=east_height, method='nearest')
east_df = east.reset_coords(drop=True).to_dataframe()
if times is not None:
phys = phys.sel(sound_time=slice(*times))
ds = phys.resample(sound_time=sample).mean(
).to_dataset(name='Bet-dagan-radiosonde')
ds = ds.rename({'sound_time': 'time'})
gps = xr.load_dataset(
path / 'GNSS_PW_thresh_50_homogenized.nc')[gps_station]
if times is not None:
gps = gps.sel(time=slice(*times))
ds[gps_station] = gps.resample(time=sample).mean()
df = ds.to_dataframe()
# now plot:
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
# [x.set_xlim([pd.to_datetime(times[0]), pd.to_datetime(times[1])])
# for x in axes]
df.columns = ['Bet dagan soundings', '{} GNSS station'.format(gps_station)]
sns.lineplot(data=df, markers=['o', 's'], linewidth=2.0, ax=axes[0])
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 1] - df.iloc[:, 0]
df_r.columns = ['Residual distribution']
sns.lineplot(data=df_r, color='k', marker='o', linewidth=1.5, ax=axes[1])
if east_height is not None:
ax_east = axes[1].twinx()
sns.lineplot(data=east_df, color='red',
marker='x', linewidth=1.5, ax=ax_east)
ax_east.set_ylabel(
'East drift at {} km altitude [km]'.format(east_height / 1000.0))
axes[1].axhline(y=0, color='r')
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
axes[0].set_ylabel('Precipitable Water [mm]')
axes[1].set_ylabel('Residuals [mm]')
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
return ds
def plot_wetz_example(path=tela_results_path, plot='WetZ', fontsize=16,
save=True):
from aux_gps import path_glob
import matplotlib.pyplot as plt
from gipsyx_post_proc import process_one_day_gipsyx_output
filepath = path_glob(path, 'tela*_smoothFinal.tdp')[3]
if plot is None:
df, meta = process_one_day_gipsyx_output(filepath, True)
return df, meta
else:
df, meta = process_one_day_gipsyx_output(filepath, False)
if not isinstance(plot, str):
raise ValueError('pls pick only one field to plot., e.g., WetZ')
error_plot = '{}_error'.format(plot)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
desc = meta['desc'][plot]
unit = meta['units'][plot]
df[plot].plot(ax=ax, legend=False, color='k')
ax.fill_between(df.index, df[plot] - df[error_plot],
df[plot] + df[error_plot], alpha=0.5)
ax.grid()
# ax.set_title('{} from station TELA in {}'.format(
# desc, df.index[100].strftime('%Y-%m-%d')))
ax.set_ylabel('WetZ [{}]'.format(unit), fontsize=fontsize)
ax.set_xlabel('Time [UTC]', fontsize=fontsize)
ax.tick_params(which='both', labelsize=fontsize)
ax.grid('on')
fig.tight_layout()
filename = 'wetz_tela_daily.png'
caption('{} from station TELA in {}. Note the error estimation from the GipsyX software(filled)'.format(
desc, df.index[100].strftime('%Y-%m-%d')))
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_figure_3(path=tela_solutions, year=2004, field='WetZ',
middle_date='11-25', zooms=[10, 3, 0.5], save=True):
from gipsyx_post_proc import analyse_results_ds_one_station
import xarray as xr
import matplotlib.pyplot as plt
import pandas as pd
dss = xr.open_dataset(path / 'TELA_ppp_raw_{}.nc'.format(year))
nums = sorted(list(set([int(x.split('-')[1])
for x in dss if x.split('-')[0] == field])))
ds = dss[['{}-{}'.format(field, i) for i in nums]]
da = analyse_results_ds_one_station(dss, field=field, plot=False)
fig, axes = plt.subplots(ncols=1, nrows=3, sharex=False, figsize=(16, 10))
for j, ax in enumerate(axes):
start = pd.to_datetime('{}-{}'.format(year, middle_date)
) - pd.Timedelta(zooms[j], unit='D')
end = pd.to_datetime('{}-{}'.format(year, middle_date)
) + pd.Timedelta(zooms[j], unit='D')
daa = da.sel(time=slice(start, end))
for i, ppp in enumerate(ds):
ds['{}-{}'.format(field, i)].plot(ax=ax, linewidth=3.0)
daa.plot.line(marker='.', linewidth=0., ax=ax, color='k')
axes[j].set_xlim(start, end)
axes[j].set_ylim(daa.min() - 0.5, daa.max() + 0.5)
try:
axes[j - 1].axvline(x=start, color='r', alpha=0.85,
linestyle='--', linewidth=2.0)
axes[j - 1].axvline(x=end, color='r', alpha=0.85,
linestyle='--', linewidth=2.0)
except IndexError:
pass
units = ds.attrs['{}>units'.format(field)]
sta = da.attrs['station']
desc = da.attrs['{}>desc'.format(field)]
ax.set_ylabel('{} [{}]'.format(field, units))
ax.set_xlabel('')
ax.grid()
# fig.suptitle(
# '30 hours stitched {} for GNSS station {}'.format(
# desc, sta), fontweight='bold')
fig.tight_layout()
caption('20, 6 and 1 days of zenith wet delay in 2004 from the TELA GNSS station for the top, middle and bottom figures respectively. The colored segments represent daily solutions while the black dots represent smoothed mean solutions.')
filename = 'zwd_tela_discon_panel.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
# fig.subplots_adjust(top=0.95)
return axes
def plot_figure_3_1(path=work_yuval, data='zwd'):
import xarray as xr
from aux_gps import plot_tmseries_xarray
from PW_stations import load_gipsyx_results
if data == 'zwd':
tela = load_gipsyx_results('tela', sample_rate='1H', plot_fields=None)
label = 'ZWD [cm]'
title = 'Zenith wet delay derived from GPS station TELA'
ax = plot_tmseries_xarray(tela, 'WetZ')
elif data == 'pw':
ds = xr.open_dataset(path / 'GNSS_hourly_PW.nc')
tela = ds['tela']
label = 'PW [mm]'
title = 'Precipitable water derived from GPS station TELA'
ax = plot_tmseries_xarray(tela)
ax.set_ylabel(label)
ax.set_xlim('1996-02', '2019-07')
ax.set_title(title)
ax.set_xlabel('')
ax.figure.tight_layout()
return ax
def plot_ts_tm(path=sound_path, model='TSEN',
times=['2007', '2019'], fontsize=14, save=True):
"""plot ts-tm relashonship"""
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
from PW_stations import ML_Switcher
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from sounding_procedures import get_field_from_radiosonde
models_dict = {'LR': 'Linear Regression',
'TSEN': 'Theil–Sen Regression'}
# sns.set_style('whitegrid')
pds = xr.Dataset()
Ts = get_field_from_radiosonde(path=sound_path, field='Ts',
data_type='phys', reduce=None, times=times,
plot=False)
Tm = get_field_from_radiosonde(path=sound_path, field='Tm',
data_type='phys', reduce='min', times=times,
plot=False)
pds['Tm'] = Tm
pds['Ts'] = Ts
pds = pds.dropna('sound_time')
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
pds.plot.scatter(
x='Ts',
y='Tm',
marker='.',
s=100.,
linewidth=0,
alpha=0.5,
ax=ax)
ax.grid()
ml = ML_Switcher()
fit_model = ml.pick_model(model)
X = pds.Ts.values.reshape(-1, 1)
y = pds.Tm.values
fit_model.fit(X, y)
predict = fit_model.predict(X)
coef = fit_model.coef_[0]
inter = fit_model.intercept_
ax.plot(X, predict, c='r')
bevis_tm = pds.Ts.values * 0.72 + 70.0
ax.plot(pds.Ts.values, bevis_tm, c='purple')
ax.legend(['{} ({:.2f}, {:.2f})'.format(models_dict.get(model),
coef, inter), 'Bevis 1992 et al. (0.72, 70.0)'], fontsize=fontsize-4)
# ax.set_xlabel('Surface Temperature [K]')
# ax.set_ylabel('Water Vapor Mean Atmospheric Temperature [K]')
ax.set_xlabel('Ts [K]', fontsize=fontsize)
ax.set_ylabel('Tm [K]', fontsize=fontsize)
ax.set_ylim(265, 320)
ax.tick_params(labelsize=fontsize)
axin1 = inset_axes(ax, width="40%", height="40%", loc=2)
resid = predict - y
sns.distplot(resid, bins=50, color='k', label='residuals', ax=axin1,
kde=False,
hist_kws={"linewidth": 1, "alpha": 0.5, "color": "k", 'edgecolor': 'k'})
axin1.yaxis.tick_right()
rmean = np.mean(resid)
rmse = np.sqrt(mean_squared_error(y, predict))
print(rmean, rmse)
r2 = r2_score(y, predict)
axin1.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# axin1.set_xlabel('Residual distribution[K]')
textstr = '\n'.join(['n={}'.format(pds.Ts.size),
'RMSE: ', '{:.2f} K'.format(rmse)]) # ,
# r'R$^2$: {:.2f}'.format(r2)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axin1.text(0.05, 0.95, textstr, transform=axin1.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
# axin1.text(0.2, 0.9, 'n={}'.format(pds.Ts.size),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.78, 0.9, 'RMSE: {:.2f} K'.format(rmse),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
axin1.set_xlim(-15, 15)
fig.tight_layout()
filename = 'Bet_dagan_ts_tm_fit_{}-{}.png'.format(times[0], times[1])
caption('Water vapor mean temperature (Tm) vs. surface temperature (Ts) of the Bet-Dagan radiosonde station. Ordinary least squares linear fit(red) yields the residual distribution with RMSE of 4 K. Bevis(1992) model is plotted(purple) for comparison.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_pw_tela_bet_dagan_scatterplot(path=work_yuval, sound_path=sound_path,
ims_path=ims_path, station='tela',
cats=None,
times=['2007', '2019'], wv_name='pw',
r2=False, fontsize=14,
save=True):
"""plot the PW of Bet-Dagan vs. PW of gps station"""
from PW_stations import mean_ZWD_over_sound_time_and_fit_tstm
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# sns.set_style('white')
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(path=path, sound_path=sound_path,
ims_path=ims_path,
data_type='phys',
gps_station=station,
times=times,
plot=False,
cats=cats)
ds = ds.drop_dims('time')
time_dim = list(set(ds.dims))[0]
ds = ds.rename({time_dim: 'time'})
tpw = 'tpw_bet_dagan'
ds = ds[[tpw, 'tela_pw']].dropna('time')
ds = ds.sel(time=slice(*times))
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
ds.plot.scatter(x=tpw,
y='tela_pw',
marker='.',
s=100.,
linewidth=0,
alpha=0.5,
ax=ax)
ax.plot(ds[tpw], ds[tpw], c='r')
ax.legend(['y = x'], loc='upper right', fontsize=fontsize)
if wv_name == 'pw':
ax.set_xlabel('PWV from Bet-Dagan [mm]', fontsize=fontsize)
ax.set_ylabel('PWV from TELA GPS station [mm]', fontsize=fontsize)
elif wv_name == 'iwv':
ax.set_xlabel(
r'IWV from Bet-Dagan station [kg$\cdot$m$^{-2}$]', fontsize=fontsize)
ax.set_ylabel(
r'IWV from TELA GPS station [kg$\cdot$m$^{-2}$]', fontsize=fontsize)
ax.grid()
axin1 = inset_axes(ax, width="40%", height="40%", loc=2)
resid = ds.tela_pw.values - ds[tpw].values
sns.distplot(resid, bins=50, color='k', label='residuals', ax=axin1,
kde=False,
hist_kws={"linewidth": 1, "alpha": 0.5, "color": "k", "edgecolor": 'k'})
axin1.yaxis.tick_right()
rmean = np.mean(resid)
rmse = np.sqrt(mean_squared_error(ds[tpw].values, ds.tela_pw.values))
r2s = r2_score(ds[tpw].values, ds.tela_pw.values)
axin1.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# axin1.set_xlabel('Residual distribution[mm]')
ax.tick_params(labelsize=fontsize)
if wv_name == 'pw':
if r2:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
'bias: {:.2f} mm'.format(rmean),
'RMSE: {:.2f} mm'.format(rmse),
r'R$^2$: {:.2f}'.format(r2s)])
else:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
'bias: {:.2f} mm'.format(rmean),
'RMSE: {:.2f} mm'.format(rmse)])
elif wv_name == 'iwv':
if r2:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
r'bias: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmean),
r'RMSE: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmse),
r'R$^2$: {:.2f}'.format(r2s)])
else:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
r'bias: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmean),
r'RMSE: {:.2f} kg$\cdot$m$^{{-2}}$'.format(rmse)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axin1.text(0.05, 0.95, textstr, transform=axin1.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
#
# axin1.text(0.2, 0.95, 'n={}'.format(ds[tpw].size),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.3, 0.85, 'bias: {:.2f} mm'.format(rmean),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.35, 0.75, 'RMSE: {:.2f} mm'.format(rmse),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# fig.suptitle('Precipitable Water comparison for the years {} to {}'.format(*times))
fig.tight_layout()
caption(
'PW from TELA GNSS station vs. PW from Bet-Dagan radiosonde station in {}-{}. A 45 degree line is plotted(red) for comparison. Note the skew in the residual distribution with an RMSE of 4.37 mm.'.format(times[0], times[1]))
# fig.subplots_adjust(top=0.95)
filename = 'Bet_dagan_tela_pw_compare_{}-{}.png'.format(times[0], times[1])
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ds
def plot_tela_bet_dagan_comparison(path=work_yuval, sound_path=sound_path,
ims_path=ims_path, station='tela',
times=['2007', '2020'], cats=None,
compare='pwv',
save=True):
from PW_stations import mean_ZWD_over_sound_time_and_fit_tstm
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import matplotlib.dates as mdates
# sns.set_style('whitegrid')
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(path=path,
sound_path=sound_path,
ims_path=ims_path,
data_type='phys',
gps_station=station,
times=times,
plot=False,
cats=cats)
ds = ds.drop_dims('time')
time_dim = list(set(ds.dims))[0]
ds = ds.rename({time_dim: 'time'})
ds = ds.dropna('time')
ds = ds.sel(time=slice(*times))
if compare == 'zwd':
df = ds[['zwd_bet_dagan', 'tela']].to_dataframe()
elif compare == 'pwv':
df = ds[['tpw_bet_dagan', 'tela_pw']].to_dataframe()
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
df.columns = ['Bet-Dagan soundings', 'TELA GNSS station']
sns.scatterplot(
data=df,
s=20,
ax=axes[0],
style='x',
linewidth=0,
alpha=0.8)
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 0] - df.iloc[:, 1]
df_r.columns = ['Residual distribution']
sns.scatterplot(
data=df_r,
color='k',
s=20,
ax=axes[1],
linewidth=0,
alpha=0.5)
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
if compare == 'zwd':
axes[0].set_ylabel('Zenith Wet Delay [cm]')
axes[1].set_ylabel('Residuals [cm]')
elif compare == 'pwv':
axes[0].set_ylabel('Precipitable Water Vapor [mm]')
axes[1].set_ylabel('Residuals [mm]')
# axes[0].set_title('Zenith wet delay from Bet-Dagan radiosonde station and TELA GNSS satation')
sonde_change_x = pd.to_datetime('2013-08-20')
axes[1].axvline(sonde_change_x, color='red')
axes[1].annotate(
'changed sonde type from VIZ MK-II to PTU GPS',
(mdates.date2num(sonde_change_x),
10),
xytext=(
15,
15),
textcoords='offset points',
arrowprops=dict(
arrowstyle='fancy',
color='red'),
color='red')
# axes[1].set_aspect(3)
[x.set_xlim(*[pd.to_datetime(times[0]), pd.to_datetime(times[1])])
for x in axes]
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
filename = 'Bet_dagan_tela_{}_compare.png'.format(compare)
caption('Top: zenith wet delay from Bet-dagan radiosonde station(blue circles) and from TELA GNSS station(orange x) in 2007-2019. Bottom: residuals. Note the residuals become constrained from 08-2013 probebly due to an equipment change.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def plot_israel_map_from_shape_file(gis_path=gis_path):
import geopandas as gpd
agr = gpd.read_file(gis_path/'ISR_agriculture_districts.shp')
isr = gpd.GeoSeries(agr.geometry.unary_union)
isr.crs = agr.crs
isr = isr.to_crs(epsg=4326)
return isr
def plot_israel_map(gis_path=gis_path, rc=rc, ticklabelsize=12, ax=None):
"""general nice map for israel, need that to plot stations,
and temperature field on top of it"""
import geopandas as gpd
import contextily as ctx
import seaborn as sns
import cartopy.crs as ccrs
sns.set_style("ticks", rc=rc)
isr_with_yosh = gpd.read_file(gis_path / 'Israel_and_Yosh.shp')
isr_with_yosh.crs = {'init': 'epsg:4326'}
# isr_with_yosh = isr_with_yosh.to_crs(epsg=3857)
crs_epsg = ccrs.epsg('3857')
# crs_epsg = ccrs.epsg('2039')
if ax is None:
# fig, ax = plt.subplots(subplot_kw={'projection': crs_epsg},
# figsize=(6, 15))
bounds = isr_with_yosh.geometry.total_bounds
extent = [bounds[0], bounds[2], bounds[1], bounds[3]]
# ax.set_extent([bounds[0], bounds[2], bounds[1], bounds[3]], crs=crs_epsg)
# ax.add_geometries(isr_with_yosh.geometry, crs=crs_epsg)
ax = isr_with_yosh.plot(alpha=0.0, figsize=(6, 15))
else:
isr_with_yosh.plot(alpha=0.0, ax=ax)
ctx.add_basemap(
ax,
source=ctx.providers.Stamen.TerrainBackground,
crs='epsg:4326')
ax.xaxis.set_major_locator(ticker.MaxNLocator(2))
ax.yaxis.set_major_locator(ticker.MaxNLocator(5))
ax.yaxis.set_major_formatter(lat_formatter)
ax.xaxis.set_major_formatter(lon_formatter)
ax.tick_params(top=True, bottom=True, left=True, right=True,
direction='out', labelsize=ticklabelsize)
# scale_bar(ax, ccrs.Mercator(), 50, bounds=bounds)
return ax
def plot_israel_with_stations(gis_path=gis_path, dem_path=dem_path, ims=True,
gps=True, radio=True, terrain=True, alt=False,
ims_names=False, gps_final=False, save=True):
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import geo_annotate
from ims_procedures import produce_geo_ims
import matplotlib.pyplot as plt
import xarray as xr
import pandas as pd
import geopandas as gpd
ax = plot_israel_map(gis_path)
station_names = []
legend = []
if ims:
print('getting IMS temperature stations metadata...')
ims_t = produce_geo_ims(path=gis_path, freq='10mins', plot=False)
ims_t.plot(ax=ax, color='red', edgecolor='black', alpha=0.5)
station_names.append('ims')
legend.append('IMS stations')
if ims_names:
geo_annotate(ax, ims_t.lon, ims_t.lat,
ims_t['name_english'], xytext=(3, 3), fmt=None,
c='k', fw='normal', fs=7, colorupdown=False)
# ims, gps = produce_geo_df(gis_path=gis_path, plot=False)
if gps:
print('getting solved GNSS israeli stations metadata...')
gps_df = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
if gps_final:
to_drop = ['gilb', 'lhav', 'hrmn', 'nizn', 'spir']
gps_final_stations = [x for x in gps_df.index if x not in to_drop]
gps = gps_df.loc[gps_final_stations, :]
gps.plot(ax=ax, color='k', edgecolor='black', marker='s')
gps_stations = [x for x in gps.index]
to_plot_offset = ['gilb', 'lhav']
# [gps_stations.remove(x) for x in to_plot_offset]
gps_normal_anno = gps.loc[gps_stations, :]
# gps_offset_anno = gps.loc[to_plot_offset, :]
geo_annotate(ax, gps_normal_anno.lon, gps_normal_anno.lat,
gps_normal_anno.index.str.upper(), xytext=(3, 3), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
if alt:
geo_annotate(ax, gps_normal_anno.lon, gps_normal_anno.lat,
gps_normal_anno.alt, xytext=(4, -6), fmt='{:.0f}',
c='k', fw='bold', fs=9, colorupdown=False)
# geo_annotate(ax, gps_offset_anno.lon, gps_offset_anno.lat,
# gps_offset_anno.index.str.upper(), xytext=(4, -6), fmt=None,
# c='k', fw='bold', fs=10, colorupdown=False)
station_names.append('gps')
legend.append('GNSS stations')
if terrain:
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
fg = dem.plot.imshow(ax=ax, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level', size=8, weight='normal')
cb.ax.tick_params(labelsize=8)
ax.set_xlabel('')
ax.set_ylabel('')
if radio: # plot bet-dagan:
df = pd.Series([32.00, 34.81]).to_frame().T
df.index = ['Bet-Dagan']
df.columns = ['lat', 'lon']
bet_dagan = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=gps.crs)
bet_dagan.plot(ax=ax, color='black', edgecolor='black',
marker='+')
geo_annotate(ax, bet_dagan.lon, bet_dagan.lat,
bet_dagan.index, xytext=(4, -6), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
station_names.append('radio')
legend.append('radiosonde')
if legend:
plt.legend(legend, loc='upper left')
plt.tight_layout()
plt.subplots_adjust(bottom=0.05)
if station_names:
station_names = '_'.join(station_names)
else:
station_names = 'no_stations'
filename = 'israel_map_{}.png'.format(station_names)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_zwd_lapse_rate(path=work_yuval, fontsize=18, model='TSEN', save=True):
from PW_stations import calculate_zwd_altitude_fit
df, zwd_lapse_rate = calculate_zwd_altitude_fit(path=path, model=model,
plot=True, fontsize=fontsize)
if save:
filename = 'zwd_lapse_rate.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_ims_T_lapse_rate(ims_path=ims_path, dt='2013-10-19T22:00:00',
fontsize=16, save=True):
from aux_gps import path_glob
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# from matplotlib import rc
def choose_dt_and_lapse_rate(tdf, dt, T_alts, lapse_rate):
ts = tdf.loc[dt, :]
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
# ts.name = dt_col
# Tloc_df = Tloc_df.join(ts, how='right')
# Tloc_df = Tloc_df.dropna(axis=0)
ts_vs_alt = pd.Series(ts.values, index=T_alts)
ts_vs_alt_for_fit = ts_vs_alt.dropna()
[a, b] = np.polyfit(ts_vs_alt_for_fit.index.values,
ts_vs_alt_for_fit.values, 1)
if lapse_rate == 'auto':
lapse_rate = np.abs(a) * 1000
if lapse_rate < 5.0:
lapse_rate = 5.0
elif lapse_rate > 10.0:
lapse_rate = 10.0
return ts_vs_alt, lapse_rate
# rc('text', usetex=False)
# rc('text',latex.unicode=False)
glob_str = 'IMS_TD_israeli_10mins*.nc'
file = path_glob(ims_path, glob_str=glob_str)[0]
ds = xr.open_dataset(file)
time_dim = list(set(ds.dims))[0]
# slice to a starting year(1996?):
ds = ds.sel({time_dim: slice('1996', None)})
# years = sorted(list(set(ds[time_dim].dt.year.values)))
# get coords and alts of IMS stations:
T_alts = np.array([ds[x].attrs['station_alt'] for x in ds])
# T_lats = np.array([ds[x].attrs['station_lat'] for x in ds])
# T_lons = np.array([ds[x].attrs['station_lon'] for x in ds])
print('loading IMS_TD of israeli stations 10mins freq..')
# transform to dataframe and add coords data to df:
tdf = ds.to_dataframe()
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
dt = pd.to_datetime(dt)
# prepare the ims coords and temp df(Tloc_df) and the lapse rate:
ts_vs_alt, lapse_rate = choose_dt_and_lapse_rate(tdf, dt, T_alts, 'auto')
fig, ax_lapse = plt.subplots(figsize=(10, 6))
sns.regplot(x=ts_vs_alt.index, y=ts_vs_alt.values, color='r',
scatter_kws={'color': 'k'}, ax=ax_lapse)
# suptitle = dt.strftime('%Y-%m-%d %H:%M')
ax_lapse.set_xlabel('Altitude [m]', fontsize=fontsize)
ax_lapse.set_ylabel(r'Temperature [$\degree$C]', fontsize=fontsize)
ax_lapse.text(0.5, 0.95, r'Lapse rate: {:.2f} $\degree$C/km'.format(lapse_rate),
horizontalalignment='center', verticalalignment='center',
fontsize=fontsize,
transform=ax_lapse.transAxes, color='k')
ax_lapse.grid()
ax_lapse.tick_params(labelsize=fontsize)
# ax_lapse.set_title(suptitle, fontsize=14, fontweight='bold')
fig.tight_layout()
filename = 'ims_lapse_rate_example.png'
caption('Temperature vs. altitude for 10 PM in 2013-10-19 for all automated 10 mins IMS stations. The lapse rate is calculated using ordinary least squares linear fit.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax_lapse
def plot_figure_9(hydro_path=hydro_path, gis_path=gis_path, pw_anom=False,
max_flow_thresh=None, wv_name='pw', save=True):
from hydro_procedures import get_hydro_near_GNSS
from hydro_procedures import loop_over_gnss_hydro_and_aggregate
import matplotlib.pyplot as plt
df = get_hydro_near_GNSS(
radius=5,
hydro_path=hydro_path,
gis_path=gis_path,
plot=False)
ds = loop_over_gnss_hydro_and_aggregate(df, pw_anom=pw_anom,
max_flow_thresh=max_flow_thresh,
hydro_path=hydro_path,
work_yuval=work_yuval, ndays=3,
plot=False, plot_all=False)
names = [x for x in ds.data_vars]
fig, ax = plt.subplots(figsize=(10, 6))
for name in names:
ds.mean('station').mean('tide_start')[name].plot.line(
marker='.', linewidth=0., ax=ax)
ax.set_xlabel('Days before tide event')
ax.grid()
hstations = [ds[x].attrs['hydro_stations'] for x in ds.data_vars]
events = [ds[x].attrs['total_events'] for x in ds.data_vars]
fmt = list(zip(names, hstations, events))
ax.legend(['{} with {} stations ({} total events)'.format(x, y, z)
for x, y, z in fmt])
fig.canvas.draw()
labels = [item.get_text() for item in ax.get_xticklabels()]
xlabels = [x.replace('−', '') for x in labels]
ax.set_xticklabels(xlabels)
fig.canvas.draw()
if wv_name == 'pw':
if pw_anom:
ax.set_ylabel('PW anomalies [mm]')
else:
ax.set_ylabel('PW [mm]')
elif wv_name == 'iwv':
if pw_anom:
ax.set_ylabel(r'IWV anomalies [kg$\cdot$m$^{-2}$]')
else:
ax.set_ylabel(r'IWV [kg$\cdot$m$^{-2}$]')
fig.tight_layout()
# if pw_anom:
# title = 'Mean PW anomalies for tide stations near all GNSS stations'
# else:
# title = 'Mean PW for tide stations near all GNSS stations'
# if max_flow_thresh is not None:
# title += ' (max_flow > {} m^3/sec)'.format(max_flow_thresh)
# ax.set_title(title)
if pw_anom:
filename = 'hydro_tide_lag_pw_anom.png'
if max_flow_thresh:
filename = 'hydro_tide_lag_pw_anom_max{}.png'.format(
max_flow_thresh)
else:
filename = 'hydro_tide_lag_pw.png'
if max_flow_thresh:
filename = 'hydro_tide_lag_pw_anom_max{}.png'.format(
max_flow_thresh)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def produce_table_1(removed=['hrmn', 'nizn', 'spir'], merged={'klhv': ['klhv', 'lhav'],
'mrav': ['gilb', 'mrav']}, add_location=False,
scope='annual', remove_distance=True):
"""for scope='diurnal' use removed=['hrmn'], add_location=True
and remove_distance=False"""
from PW_stations import produce_geo_gnss_solved_stations
import pandas as pd
sites = group_sites_to_xarray(upper=False, scope=scope)
df_gnss = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
new = sites.T.values.ravel()
if scope == 'annual':
new = [x for x in new.astype(str) if x != 'nan']
df_gnss = df_gnss.reindex(new)
df_gnss['ID'] = df_gnss.index.str.upper()
pd.options.display.float_format = '{:.2f}'.format
df = df_gnss[['name', 'ID', 'lat', 'lon', 'alt', 'distance']]
df['alt'] = df['alt'].map('{:,.0f}'.format)
df['distance'] = df['distance'].astype(int)
cols = ['GNSS Station name', 'Station ID', 'Latitude [N]',
'Longitude [E]', 'Altitude [m a.s.l]', 'Distance from shore [km]']
df.columns = cols
if scope != 'annual':
df.loc['spir', 'GNSS Station name'] = 'Sapir'
if remove_distance:
df = df.iloc[:, 0:-1]
if add_location:
groups = group_sites_to_xarray(upper=False, scope=scope)
coastal = groups.sel(group='coastal').values
coastal = coastal[~pd.isnull(coastal)]
highland = groups.sel(group='highland').values
highland = highland[~pd.isnull(highland)]
eastern = groups.sel(group='eastern').values
eastern = eastern[~pd.isnull(eastern)]
df.loc[coastal, 'Location'] = 'Coastal'
df.loc[highland, 'Location'] = 'Highland'
df.loc[eastern, 'Location'] = 'Eastern'
if removed is not None:
df = df.loc[[x for x in df.index if x not in removed], :]
if merged is not None:
return df
print(df.to_latex(index=False))
return df
def produce_table_stats(thresh=50, add_location=True, add_height=True):
"""add plot sd to height with se_sd errorbars"""
from PW_stations import produce_pw_statistics
from PW_stations import produce_geo_gnss_solved_stations
import pandas as pd
import xarray as xr
sites = group_sites_to_xarray(upper=False, scope='annual')
new = sites.T.values.ravel()
sites = group_sites_to_xarray(upper=False, scope='annual')
new = [x for x in new.astype(str) if x != 'nan']
pw_mm = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_{:.0f}.nc'.format(thresh))
pw_mm = pw_mm[new]
df = produce_pw_statistics(
thresh=thresh, resample_to_mm=False, pw_input=pw_mm)
if add_location:
cols = [x for x in df.columns]
cols.insert(1, 'Location')
gr_df = sites.to_dataframe('sites')
location = [gr_df[gr_df == x].dropna().index.values.item()[
1].title() for x in new]
df['Location'] = location
df = df[cols]
if add_height:
cols = [x for x in df.columns]
if add_location:
cols.insert(2, 'Height [m a.s.l]')
else:
cols.insert(1, 'Height [m a.s.l]')
df_gnss = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=False)
# pd.options.display.float_format = '{:.2f}'.format
df['Height [m a.s.l]'] = df_gnss['alt'].map('{:.0f}'.format)
df = df[cols]
print(df.to_latex(index=False))
return df
def plot_pwv_longterm_trend(path=work_yuval, model_name='LR', save=True,
fontsize=16, add_era5=True):
import matplotlib.pyplot as plt
from aux_gps import linear_fit_using_scipy_da_ts
# from PW_stations import ML_Switcher
import xarray as xr
from aux_gps import anomalize_xr
"""TSEN and LR for linear fit"""
# load GNSS Israel:
# pw = xr.load_dataset(path / 'GNSS_PW_monthly_thresh_50_homogenized.nc')
pw = xr.load_dataset(
path / 'GNSS_PW_monthly_thresh_50.nc').sel(time=slice('1998', None))
pw_anoms = anomalize_xr(pw, 'MS', verbose=False)
pw_mean = pw_anoms.to_array('station').mean('station')
pw_std = pw_anoms.to_array('station').std('station')
pw_weights = 1 / pw_anoms.to_array('station').count('station')
# add ERA5:
era5 = xr.load_dataset(work_yuval / 'GNSS_era5_monthly_PW.nc')
era5_anoms = anomalize_xr(era5, 'MS', verbose=False)
era5_anoms = era5_anoms.sel(time=slice(
pw_mean.time.min(), pw_mean.time.max()))
era5_mean = era5_anoms.to_array('station').mean('station')
era5_std = era5_anoms.to_array('station').std('station')
# init linear models
# ml = ML_Switcher()
# model = ml.pick_model(model_name)
if add_era5:
fig, ax = plt.subplots(2, 1, figsize=(15, 7.5))
trend, trend_hi, trend_lo, slope, slope_hi, slope_lo = linear_fit_using_scipy_da_ts(pw_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None, method='curve_fit', weights=pw_weights)
pwln = pw_mean.plot(ax=ax[0], color='k', marker='o', linewidth=1.5)
trendln = trend.plot(ax=ax[0], color='r', linewidth=2)
trend_hi.plot.line('r--', ax=ax[0], linewidth=1.5)
trend_lo.plot.line('r--', ax=ax[0], linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope, slope_lo, slope_hi)
handles = pwln+trendln
labels = ['PWV-mean']
labels.append(trend_label)
ax[0].legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax[0].grid()
ax[0].set_xlabel('')
ax[0].set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax[0].tick_params(labelsize=fontsize)
trend1, trend_hi1, trend_lo1, slope1, slope_hi1, slope_lo1 = linear_fit_using_scipy_da_ts(era5_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None, method='curve_fit', weights=era5_std)
era5ln = era5_mean.plot(ax=ax[1], color='k', marker='o', linewidth=1.5)
trendln1 = trend1.plot(ax=ax[1], color='r', linewidth=2)
trend_hi1.plot.line('r--', ax=ax[1], linewidth=1.5)
trend_lo1.plot.line('r--', ax=ax[1], linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope1, slope_lo1, slope_hi1)
handles = era5ln+trendln1
labels = ['ERA5-mean']
labels.append(trend_label)
ax[1].legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax[1].grid()
ax[1].set_xlabel('')
ax[1].set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax[1].tick_params(labelsize=fontsize)
else:
fig, ax = plt.subplots(1, 1, figsize=(15, 5.5))
trend, trend_hi, trend_lo, slope, slope_hi, slope_lo = linear_fit_using_scipy_da_ts(pw_mean, model=model_name, slope_factor=3650.25,
plot=False, ax=None, units=None)
pwln = pw_mean.plot(ax=ax, color='k', marker='o', linewidth=1.5)
trendln = trend.plot(ax=ax, color='r', linewidth=2)
trend_hi.plot.line('r--', ax=ax, linewidth=1.5)
trend_lo.plot.line('r--', ax=ax, linewidth=1.5)
trend_label = '{} model, slope={:.2f} ({:.2f}, {:.2f}) mm/decade'.format(
model_name, slope, slope_lo, slope_hi)
handles = pwln+trendln
labels = ['PWV-mean']
labels.append(trend_label)
ax.legend(handles=handles, labels=labels, loc='upper left',
fontsize=fontsize)
ax.grid()
ax.set_xlabel('')
ax.set_ylabel('PWV mean anomalies [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
fig.suptitle('PWV mean anomalies and linear trend',
fontweight='bold', fontsize=fontsize)
fig.tight_layout()
if save:
filename = 'pwv_mean_trend_{}.png'.format(model_name)
plt.savefig(savefig_path / filename, orientation='portrait')
return ax
def plot_trend_filled_pwv_and_era5_barh_plot(path=work_yuval):
import xarray as xr
from aux_gps import path_glob
from PW_stations import process_mkt_from_dataset
import pandas as pd
import seaborn as sns
file = sorted(
path_glob(path, 'GNSS_PW_monthly_homogenized_filled_*.nc'))[0]
gnss = xr.load_dataset(path / file)
era5 = xr.load_dataset(path / 'GNSS_era5_monthly_PW.nc')
era5 = era5.sel(time=slice(gnss.time.min(), gnss.time.max()))
era5 = era5[[x for x in era5 if x in gnss]]
df_gnss = process_mkt_from_dataset(
gnss,
alpha=0.95,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_gnss = add_location_to_GNSS_stations_dataframe(df_gnss)
df_gnss['sig'] = df_gnss['p'].astype(float) <= 0.05
df_era5 = process_mkt_from_dataset(
era5,
alpha=0.95,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_era5 = add_location_to_GNSS_stations_dataframe(df_era5)
df_era5['sig'] = df_era5['p'].astype(float) <= 0.05
df = pd.concat([df_gnss, df_era5], keys=['GNSS', 'ERA5'])
df1 = df.unstack(level=0)
df = df1.stack().reset_index()
df.columns = ['station', '', 'p', 'Tau', 'slope', 'intercept', 'CI_5_low',
'CI_5_high', 'Location', 'sig']
sns.barplot(x="slope", y='station', hue='', data=df[df['sig']])
# df['slope'].unstack(level=0).plot(kind='barh', subplots=False, xerr=1)
return df
def produce_filled_pwv_and_era5_mann_kendall_table(path=work_yuval):
import xarray as xr
from aux_gps import path_glob
file = sorted(
path_glob(path, 'GNSS_PW_monthly_homogenized_filled_*.nc'))[0]
gnss = xr.load_dataset(path / file)
era5 = xr.load_dataset(path / 'GNSS_era5_monthly_PW.nc')
era5 = era5.sel(time=slice(gnss.time.min(), gnss.time.max()))
df = add_comparison_to_mann_kendall_table(gnss, era5, 'GNSS', 'ERA5')
print(df.to_latex(header=False, index=False))
return df
def add_comparison_to_mann_kendall_table(ds1, ds2, name1='GNSS', name2='ERA5',
alpha=0.05):
df1 = produce_table_mann_kendall(ds1, alpha=alpha)
df2 = produce_table_mann_kendall(ds2, alpha=alpha)
df = df1['Site ID'].to_frame()
df[name1+'1'] = df1["Kendall's Tau"]
df[name2+'1'] = df2["Kendall's Tau"]
df[name1+'2'] = df1['P-value']
df[name2+'2'] = df2['P-value']
df[name1+'3'] = df1["Sen's slope"]
df[name2+'3'] = df2["Sen's slope"]
df[name1+'4'] = df1["Percent change"]
df[name2+'4'] = df2["Percent change"]
return df
def produce_table_mann_kendall(pwv_ds, alpha=0.05,
sort_by=['groups_annual', 'lat']):
from PW_stations import process_mkt_from_dataset
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import reduce_tail_xr
import xarray as xr
def table_process_df(df, means):
df_sites = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
sites = df_sites.dropna()[['lat', 'alt', 'distance', 'groups_annual']].sort_values(
by=sort_by, ascending=[1, 0]).index
# calculate percent changes from last decade means:
df['CI95'] = '(' + df['CI_95_low'].map('{:.2f}'.format).astype(
str) + ', ' + df['CI_95_high'].map('{:.2f}'.format).astype(str) + ')'
df['means'] = means
df['Pct_change'] = 100 * df['slope'] / df['means']
Pct_high = 100 * df['CI_95_high'] / df['means']
Pct_low = 100 * df['CI_95_low'] / df['means']
df['Pct_change_CI95'] = '(' + Pct_low.map('{:.2f}'.format).astype(
str) + ', ' + Pct_high.map('{:.2f}'.format).astype(str) + ')'
# df['Temperature change'] = df['Percent change'] / 7.0
df.drop(['means', 'CI_95_low', 'CI_95_high'], axis=1, inplace=True)
# station id is big:
df['id'] = df.index.str.upper()
# , 'Temperature change']]
df = df[['id', 'Tau', 'p', 'slope', 'CI95',
'Pct_change', 'Pct_change_CI95']]
# filter for non significant trends:
# df['slope'] = df['slope'][df['p'] < 0.05]
# df['Pct_change'] = df['Pct_change'][df['p'] < 0.05]
# df['CI95'] = df['CI95'][df['p'] < 0.05]
# df['Pct_change_CI95'] = df['Pct_change_CI95'][df['p'] < 0.05]
# higher and better results:
df.loc[:, 'p'][df['p'] < 0.001] = '<0.001'
df['p'][df['p'] != '<0.001'] = df['p'][df['p'] !=
'<0.001'].astype(float).map('{:,.3f}'.format)
df['Tau'] = df['Tau'].map('{:,.3f}'.format)
df['slope'] = df['slope'].map('{:,.2f}'.format)
df['slope'][df['slope'] == 'nan'] = '-'
df.columns = [
'Site ID',
"Kendall's Tau",
'P-value',
"Sen's slope", "Sen's slope CI 95%",
'Percent change', 'Percent change CI 95%'] # , 'Temperature change']
df['Percent change'] = df['Percent change'].map('{:,.1f}'.format)
df['Percent change'] = df[df["Sen's slope"] != '-']['Percent change']
df['Percent change'] = df['Percent change'].fillna('-')
df["Sen's slope CI 95%"] = df["Sen's slope CI 95%"].fillna(' ')
df['Percent change CI 95%'] = df['Percent change CI 95%'].fillna(' ')
df["Sen's slope"] = df["Sen's slope"].astype(
str) + ' ' + df["Sen's slope CI 95%"].astype(str)
df['Percent change'] = df['Percent change'].astype(
str) + ' ' + df['Percent change CI 95%'].astype(str)
df.drop(['Percent change CI 95%', "Sen's slope CI 95%"],
axis=1, inplace=True)
# df['Temperature change'] = df['Temperature change'].map('{:,.1f}'.format)
# df['Temperature change'] = df[df["Sen's slope"] != '-']['Temperature change']
# df['Temperature change'] = df['Temperature change'].fillna('-')
# last, reindex according to geography:
# gr = group_sites_to_xarray(scope='annual')
# new = [x for x in gr.T.values.ravel() if isinstance(x, str)]
new = [x for x in sites if x in df.index]
df = df.reindex(new)
return df
# if load_data == 'pwv-homo':
# print('loading homogenized (RH) pwv dataset.')
# data = xr.load_dataset(work_yuval /
# 'GNSS_PW_monthly_thresh_{:.0f}_homogenized.nc'.format(thresh))
# elif load_data == 'pwv-orig':
# print('loading original pwv dataset.')
# data = xr.load_dataset(work_yuval /
# 'GNSS_PW_monthly_thresh_{:.0f}.nc'.format(thresh))
# elif load_data == 'pwv-era5':
# print('loading era5 pwv dataset.')
# data = xr.load_dataset(work_yuval / 'GNSS_era5_monthly_PW.nc')
# if pwv_ds is not None:
# print('loading user-input pwv dataset.')
# data = pwv_ds
df = process_mkt_from_dataset(
pwv_ds,
alpha=alpha,
season_selection=None,
seasonal=False,
factor=120,
anomalize=True, CI=True)
df_mean = reduce_tail_xr(pwv_ds, reduce='mean', records=120,
return_df=True)
table = table_process_df(df, df_mean)
# print(table.to_latex(index=False))
return table
def plot_filled_and_unfilled_pwv_monthly_anomalies(pw_da, anomalize=True,
max_gap=6,
method='cubic',
ax=None):
from aux_gps import anomalize_xr
import matplotlib.pyplot as plt
import numpy as np
if anomalize:
pw_da = anomalize_xr(pw_da, 'MS')
max_gap_td = np.timedelta64(max_gap, 'M')
filled = pw_da.interpolate_na('time', method=method, max_gap=max_gap_td)
if ax is None:
fig, ax = plt.subplots(figsize=(15, 5))
filledln = filled.plot.line('b-', ax=ax)
origln = pw_da.plot.line('r-', ax=ax)
ax.legend(origln + filledln,
['original time series',
'filled using {} interpolation with max gap of {} months'.format(method,
max_gap)])
ax.grid()
ax.set_xlabel('')
ax.set_ylabel('PWV [mm]')
ax.set_title('PWV station {}'.format(pw_da.name.upper()))
return ax
def plot_pwv_statistic_vs_height(pwv_ds, stat='mean', x='alt', season=None,
ax=None, color='b'):
from PW_stations import produce_geo_gnss_solved_stations
import matplotlib.pyplot as plt
from aux_gps import calculate_std_error
import pandas as pd
if season is not None:
print('{} season selected'.format(season))
pwv_ds = pwv_ds.sel(time=pwv_ds['time.season'] == season)
df = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
if stat == 'mean':
pw_stat = pwv_ds.mean()
pw_stat_error = pwv_ds.map(calculate_std_error, statistic=stat)
elif stat == 'std':
pw_stat = pwv_ds.std()
pw_stat_error = pwv_ds.map(calculate_std_error, statistic=stat)
df[stat] = pd.Series(
pw_stat.to_array(
dim='gnss'),
index=pw_stat.to_array('gnss')['gnss'])
df['{}_error'.format(stat)] = pd.Series(pw_stat_error.to_array(
dim='gnss'), index=pw_stat_error.to_array('gnss')['gnss'])
if ax is None:
fig, ax = plt.subplots()
if x == 'alt':
ax.set_xlabel('Altitude [m a.s.l]')
elif x == 'distance':
ax.set_xlabel('Distance to sea shore [km]')
ax.set_ylabel('{} [mm]'.format(stat))
ax.errorbar(df[x],
df[stat],
df['{}_error'.format(stat)],
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color=color)
if season is not None:
ax.set_title('{} season'.format(season))
ax.grid()
return ax
def add_location_to_GNSS_stations_dataframe(df, scope='annual'):
import pandas as pd
# load location data:
gr = group_sites_to_xarray(scope=scope)
gr_df = gr.to_dataframe('sites')
new = gr.T.values.ravel()
# remove nans form mixed nans and str numpy:
new = new[~pd.isnull(new)]
geo = [gr_df[gr_df == x].dropna().index.values.item()[1] for x in new]
geo = [x.title() for x in geo]
df = df.reindex(new)
df['Location'] = geo
return df
def plot_peak_amplitude_altitude_long_term_pwv(path=work_yuval, era5=False,
add_a1a2=True, save=True, fontsize=16):
import xarray as xr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from fitting_routines import fit_poly_model_xr
from aux_gps import remove_suffix_from_ds
from PW_stations import produce_geo_gnss_solved_stations
# load alt data, distance etc.,
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
df_geo = produce_geo_gnss_solved_stations(
plot=False, add_distance_to_coast=True)
if era5:
dss = xr.load_dataset(path / 'GNSS_PW_ERA5_harmonics_annual.nc')
else:
dss = xr.load_dataset(path / 'GNSS_PW_harmonics_annual.nc')
dss = dss[[x for x in dss if '_params' in x]]
dss = remove_suffix_from_ds(dss)
df = dss.sel(cpy=1, params='ampl').reset_coords(drop=True).to_dataframe().T
df.columns = ['A1', 'A1std']
df = df.join(dss.sel(cpy=2, params='ampl').reset_coords(drop=True).to_dataframe().T)
# abs bc sometimes the fit get a sine amp negative:
df = np.abs(df)
df.columns =['A1', 'A1std', 'A2', 'A2std']
df['A2A1'] = df['A2'] / df['A1']
a2a1std = np.sqrt((df['A2std']/df['A1'])**2 + (df['A2']*df['A1std']/df['A1']**2)**2)
df['A2A1std'] = a2a1std
# load location data:
gr = group_sites_to_xarray(scope='annual')
gr_df = gr.to_dataframe('sites')
new = gr.T.values.ravel()
# remove nans form mixed nans and str numpy:
new = new[~pd.isnull(new)]
geo = [gr_df[gr_df == x].dropna().index.values.item()[1] for x in new]
geo = [x.title() for x in geo]
df = df.reindex(new)
df['Location'] = geo
df['alt'] = df_geo['alt']
df = df.set_index('alt')
df = df.sort_index()
cdict = produce_colors_for_pwv_station(scope='annual', as_cat_dict=True)
cdict = dict(zip([x.capitalize() for x in cdict.keys()], cdict.values()))
if add_a1a2:
fig, axes=plt.subplots(2, 1, sharex=False, figsize=(8, 12))
ax = axes[0]
else:
ax = None
# colors=produce_colors_for_pwv_station(scope='annual')
ax = sns.scatterplot(data=df, y='A1', x='alt', hue='Location',
palette=cdict, ax=ax, s=100, zorder=20)
# ax.legend(prop={'size': fontsize})
x_coords = []
y_coords = []
colors = []
for point_pair in ax.collections:
colors.append(point_pair.get_facecolor())
for x, y in point_pair.get_offsets():
x_coords.append(x)
y_coords.append(y)
ax.errorbar(x_coords, y_coords,
yerr=df['A1std'].values, ecolor=colors[0][:,0:-1],
ls='', capsize=None, fmt=" ")#, zorder=-1)
# linear fit:
x = df.index.values
y = df['A1'].values
p = fit_poly_model_xr(x, y, 1, plot=None, ax=None, return_just_p=True)
fit_label = r'Fitted line, slope: {:.2f} mm$\cdot$km$^{{-1}}$'.format(p[0] * -1000)
fit_poly_model_xr(x,y,1,plot='manual', ax=ax, fit_label=fit_label)
ax.set_ylabel('PWV annual amplitude [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.set_yticks(np.arange(1, 6, 1))
if add_a1a2:
ax.set_xlabel('')
else:
ax.set_xlabel('GNSS station height [m a.s.l]')
ax.grid(True)
ax.legend(prop={'size': fontsize-3})
if add_a1a2:
# convert to percent:
df['A2A1'] = df['A2A1'].mul(100)
df['A2A1std'] = df['A2A1std'].mul(100)
ax = sns.scatterplot(data=df, y='A2A1', x='alt',
hue='Location', ax=axes[1],
legend=True, palette=cdict,
s=100, zorder=20)
x_coords = []
y_coords = []
colors = []
# ax.legend(prop={'size':fontsize+4}, fontsize=fontsize)
for point_pair in ax.collections:
colors.append(point_pair.get_facecolor())
for x, y in point_pair.get_offsets():
x_coords.append(x)
y_coords.append(y)
ax.errorbar(x_coords, y_coords,
yerr=df['A2A1std'].values, ecolor=colors[0][:,0:-1],
ls='', capsize=None, fmt=" ")#, zorder=-1)
df_upper = df.iloc[9:]
y = df_upper['A2A1'].values
x = df_upper.index.values
p = fit_poly_model_xr(x, y, 1, return_just_p=True)
fit_label = r'Fitted line, slope: {:.1f} %$\cdot$km$^{{-1}}$'.format(p[0] * 1000)
p = fit_poly_model_xr(x, y, 1, plot='manual', ax=ax,
return_just_p=False, color='r',
fit_label=fit_label)
df_lower = df.iloc[:11]
mean = df_lower['A2A1'].mean()
std = df_lower['A2A1'].std()
stderr = std / np.sqrt(len(df_lower))
ci = 1.96 * stderr
ax.hlines(xmin=df_lower.index.min(), xmax=df_lower.index.max(), y=mean,
color='k', label='Mean ratio: {:.1f} %'.format(mean))
ax.fill_between(df_lower.index.values, mean + ci, mean - ci, color="#b9cfe7", edgecolor=None, alpha=0.6)
# y = df_lower['A2A1'].values
# x = df_lower.index.values
# p = fit_poly_model_xr(x, y, 1, return_just_p=True)
# fit_label = 'Linear Fit intercept: {:.2f} %'.format(p[1])
# p = fit_poly_model_xr(x, y, 1, plot='manual', ax=ax,
# return_just_p=False, color='k',
# fit_label=fit_label)
# arrange the legend a bit:
handles, labels = ax.get_legend_handles_labels()
h_stns = handles[1:4]
l_stns = labels[1:4]
h_fits = [handles[0] , handles[-1]]
l_fits = [labels[0], labels[-1]]
ax.legend(handles=h_fits+h_stns, labels=l_fits+l_stns, loc='upper left', prop={'size':fontsize-3})
ax.set_ylabel('PWV semi-annual to annual amplitude ratio [%]', fontsize=fontsize)
ax.set_xlabel('GNSS station height [m a.s.l]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.grid(True)
ax.set_yticks(np.arange(0, 100, 20))
fig.tight_layout()
if save:
filename = 'pwv_peak_amplitude_altitude.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_peak_hour_distance(path=work_yuval, season='JJA',
remove_station='dsea', fontsize=22, save=True):
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import groupby_half_hour_xr
from aux_gps import xr_reindex_with_date_range
import xarray as xr
import pandas as pd
import seaborn as sns
import numpy as np
from sklearn.metrics import r2_score
pw = xr.open_dataset(path / 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')
pw = pw[[x for x in pw if '_error' not in x]]
pw.load()
pw = pw.sel(time=pw['time.season'] == season)
pw = pw.map(xr_reindex_with_date_range)
df = groupby_half_hour_xr(pw)
halfs = [df.isel(half_hour=x)['half_hour'] for x in df.argmax().values()]
names = [x for x in df]
dfh = pd.DataFrame(halfs, index=names)
geo = produce_geo_gnss_solved_stations(
add_distance_to_coast=True, plot=False)
geo['phase'] = dfh
geo = geo.dropna()
groups = group_sites_to_xarray(upper=False, scope='diurnal')
geo.loc[groups.sel(group='coastal').values, 'group'] = 'coastal'
geo.loc[groups.sel(group='highland').values, 'group'] = 'highland'
geo.loc[groups.sel(group='eastern').values, 'group'] = 'eastern'
fig, ax = plt.subplots(figsize=(14, 10))
ax.grid()
if remove_station is not None:
removed = geo.loc[remove_station].to_frame().T
geo = geo.drop(remove_station, axis=0)
# lnall = sns.scatterplot(data=geo.loc[only], x='distance', y='phase', ax=ax, hue='group', s=100)
# geo['phase'] = pd.to_timedelta(geo['phase'], unit='H')
coast = geo[geo['group'] == 'coastal']
yerr = 1.0
lncoast = ax.errorbar(x=coast.loc[:,
'distance'],
y=coast.loc[:,
'phase'],
yerr=yerr,
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color='b')
# lncoast = ax.scatter(coast.loc[:, 'distance'], coast.loc[:, 'phase'], color='b', s=50)
highland = geo[geo['group'] == 'highland']
# lnhighland = ax.scatter(highland.loc[:, 'distance'], highland.loc[:, 'phase'], color='brown', s=50)
lnhighland = ax.errorbar(x=highland.loc[:,
'distance'],
y=highland.loc[:,
'phase'],
yerr=yerr,
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color='brown')
eastern = geo[geo['group'] == 'eastern']
# lneastern = ax.scatter(eastern.loc[:, 'distance'], eastern.loc[:, 'phase'], color='green', s=50)
lneastern = ax.errorbar(x=eastern.loc[:,
'distance'],
y=eastern.loc[:,
'phase'],
yerr=yerr,
marker='o',
ls='',
capsize=2.5,
elinewidth=2.5,
markeredgewidth=2.5,
color='green')
lnremove = ax.scatter(
removed.loc[:, 'distance'], removed.loc[:, 'phase'], marker='x', color='k', s=50)
ax.legend([lncoast,
lnhighland,
lneastern,
lnremove],
['Coastal stations',
'Highland stations',
'Eastern stations',
'DSEA station'],
fontsize=fontsize)
params = np.polyfit(geo['distance'].values, geo.phase.values, 1)
params2 = np.polyfit(geo['distance'].values, geo.phase.values, 2)
x = np.linspace(0, 210, 100)
y = np.polyval(params, x)
y2 = np.polyval(params2, x)
r2 = r2_score(geo.phase.values, np.polyval(params, geo['distance'].values))
ax.plot(x, y, color='k')
textstr = '\n'.join([r'R$^2$: {:.2f}'.format(r2)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.5, 0.95, textstr, transform=ax.transAxes, fontsize=fontsize,
verticalalignment='top', bbox=props)
# ax.plot(x,y2, color='green')
ax.tick_params(axis='both', which='major', labelsize=16)
ax.set_xlabel('Distance from shore [km]', fontsize=fontsize)
ax.set_ylabel('Peak hour [UTC]', fontsize=fontsize)
# add sunrise UTC hour
ax.axhline(16.66, color='tab:orange', linewidth=2)
# change yticks to hours minuets:
fig.canvas.draw()
labels = [item.get_text() for item in ax.get_yticklabels()]
labels = [pd.to_timedelta(float(x), unit='H') for x in labels]
labels = ['{}:{}'.format(x.components[1], x.components[2])
if x.components[2] != 0 else '{}:00'.format(x.components[1]) for x in labels]
ax.set_yticklabels(labels)
fig.canvas.draw()
ax.tick_params(axis='both', which='major', labelsize=fontsize)
if save:
filename = 'pw_peak_distance_shore.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_monthly_variability_heatmap_from_pwv_anomalies(load_path=work_yuval,
thresh=50, save=True,
fontsize=16,
sort_by=['groups_annual', 'alt']):
"""sort_by=['group_annual', 'lat'], ascending=[1,0]"""
import xarray as xr
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from calendar import month_abbr
from PW_stations import produce_geo_gnss_solved_stations
df = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
sites = df.dropna()[['lat', 'alt', 'distance', 'groups_annual']].sort_values(
by=sort_by, ascending=[1, 1]).index
# anoms = xr.load_dataset(
# load_path /
# 'GNSS_PW_monthly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
anoms = xr.load_dataset(
load_path /
'GNSS_PW_monthly_anoms_thresh_{:.0f}.nc'.format(thresh))
df = anoms.groupby('time.month').std().to_dataframe()
# sites = group_sites_to_xarray(upper=True, scope='annual').T
# sites_flat = [x.lower() for x in sites.values.flatten() if isinstance(x, str)]
# df = df[sites_flat]
# cols = [x for x in sites if x in df.columns]
df = df[sites]
df.columns = [x.upper() for x in df.columns]
fig = plt.figure(figsize=(14, 10))
grid = plt.GridSpec(
2, 1, height_ratios=[
2, 1], hspace=0)
ax_heat = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_group = fig.add_subplot(grid[1, 0]) # plt.subplot(223)
cbar_ax = fig.add_axes([0.91, 0.37, 0.02, 0.62]) # [left, bottom, width,
# height]
ax_heat = sns.heatmap(
df.T,
cmap='Reds',
vmin=df.min().min(),
vmax=df.max().max(),
annot=True,
yticklabels=True,
ax=ax_heat,
cbar_ax=cbar_ax,
cbar_kws={'label': 'PWV anomalies STD [mm]'},
annot_kws={'fontsize': fontsize}, xticklabels=False)
cbar_ax.set_ylabel('PWV anomalies STD [mm]', fontsize=fontsize)
cbar_ax.tick_params(labelsize=fontsize)
# activate top ticks and tickslabales:
ax_heat.xaxis.set_tick_params(
bottom='off',
labelbottom='off',
labelsize=fontsize)
# emphasize the yticklabels (stations):
ax_heat.yaxis.set_tick_params(left='on')
ax_heat.set_yticklabels(ax_heat.get_ymajorticklabels(),
fontweight='bold', fontsize=fontsize)
df_mean = df.T.mean()
df_mean = df_mean.to_frame()
df_mean[1] = [month_abbr[x] for x in range(1, 13)]
df_mean.columns = ['std', 'month']
g = sns.barplot(data=df_mean, x='month', y='std', ax=ax_group, palette='Reds',
hue='std', dodge=False, linewidth=2.5)
g.legend_.remove()
ax_group.set_ylabel('PWV anomalies STD [mm]', fontsize=fontsize)
ax_group.grid(color='k', linestyle='--',
linewidth=1.5, alpha=0.5, axis='y')
ax_group.xaxis.set_tick_params(labelsize=fontsize)
ax_group.yaxis.set_tick_params(labelsize=fontsize)
ax_group.set_xlabel('', fontsize=fontsize)
# df.T.mean().plot(ax=ax_group, kind='bar', color='k', fontsize=fontsize, rot=0)
fig.tight_layout()
fig.subplots_adjust(right=0.906)
if save:
filename = 'pw_anoms_monthly_variability_heatmap.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_monthly_means_anomalies_with_station_mean(load_path=work_yuval,
thresh=50, save=True,
anoms=None, agg='mean',
fontsize=16, units=None,
remove_stations=['nizn', 'spir'],
sort_by=['groups_annual', 'lat']):
import xarray as xr
import seaborn as sns
from palettable.scientific import diverging as divsci
import numpy as np
import matplotlib.dates as mdates
import pandas as pd
from aux_gps import anomalize_xr
from PW_stations import produce_geo_gnss_solved_stations
sns.set_style('whitegrid')
sns.set_style('ticks')
div_cmap = divsci.Vik_20.mpl_colormap
df = produce_geo_gnss_solved_stations(plot=False,
add_distance_to_coast=True)
sites = df.dropna()[['lat', 'alt', 'distance', 'groups_annual']].sort_values(
by=sort_by, ascending=[1, 0]).index
if anoms is None:
# anoms = xr.load_dataset(
# load_path /
# 'GNSS_PW_monthly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
anoms = xr.load_dataset(
load_path /
'GNSS_PW_monthly_thresh_{:.0f}.nc'.format(thresh))
anoms = anomalize_xr(anoms, 'MS', units=units)
if remove_stations is not None:
anoms = anoms[[x for x in anoms if x not in remove_stations]]
df = anoms.to_dataframe()[:'2019']
# sites = group_sites_to_xarray(upper=True, scope='annual').T
# sites_flat = [x.lower() for x in sites.values.flatten() if isinstance(x, str)]
# df = df[sites_flat]
cols = [x for x in sites if x in df.columns]
df = df[cols]
df.columns = [x.upper() for x in df.columns]
weights = df.count(axis=1).shift(periods=-1, freq='15D').astype(int)
fig = plt.figure(figsize=(20, 10))
grid = plt.GridSpec(
2, 1, height_ratios=[
2, 1], hspace=0.0225)
ax_heat = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_group = fig.add_subplot(grid[1, 0]) # plt.subplot(223)
cbar_ax = fig.add_axes([0.95, 0.43, 0.0125, 0.45]) # [left, bottom, width,
# height]
ax_heat = sns.heatmap(
df.T,
center=0.0,
cmap=div_cmap,
yticklabels=True,
ax=ax_heat,
cbar_ax=cbar_ax,
cbar_kws={'label': 'PWV anomalies [mm]'}, xticklabels=False)
cbar_ax.set_ylabel('PWV anomalies [mm]', fontsize=fontsize-4)
cbar_ax.tick_params(labelsize=fontsize)
# activate top ticks and tickslabales:
ax_heat.xaxis.set_tick_params(
bottom='off', labelbottom='off', labelsize=fontsize)
# emphasize the yticklabels (stations):
ax_heat.yaxis.set_tick_params(left='on')
ax_heat.set_yticklabels(ax_heat.get_ymajorticklabels(),
fontweight='bold', fontsize=fontsize)
ax_heat.set_xlabel('')
if agg == 'mean':
ts = df.T.mean().shift(periods=-1, freq='15D')
elif agg == 'median':
ts = df.T.median().shift(periods=-1, freq='15D')
ts.index.name = ''
# dt_as_int = [x for x in range(len(ts.index))]
# xticks_labels = ts.index.strftime('%Y-%m').values[::6]
# xticks = dt_as_int[::6]
# xticks = ts.index
# ts.index = dt_as_int
ts.plot(ax=ax_group, color='k', fontsize=fontsize, lw=2)
barax = ax_group.twinx()
barax.bar(ts.index, weights.values, width=35, color='k', alpha=0.2)
barax.yaxis.set_major_locator(ticker.MaxNLocator(6))
barax.set_ylabel('Stations [#]', fontsize=fontsize-4)
barax.tick_params(labelsize=fontsize)
ax_group.set_xlim(ts.index.min(), ts.index.max() +
pd.Timedelta(15, unit='D'))
ax_group.set_ylabel('PWV {} anomalies [mm]'.format(agg), fontsize=fontsize-4)
# set ticks and align with heatmap axis (move by 0.5):
# ax_group.set_xticks(dt_as_int)
# offset = 1
# ax_group.xaxis.set(ticks=np.arange(offset / 2.,
# max(dt_as_int) + 1 - min(dt_as_int),
# offset),
# ticklabels=dt_as_int)
# move the lines also by 0.5 to align with heatmap:
# lines = ax_group.lines # get the lines
# [x.set_xdata(x.get_xdata() - min(dt_as_int) + 0.5) for x in lines]
# ax_group.xaxis.set(ticks=xticks, ticklabels=xticks_labels)
# ax_group.xaxis.set(ticks=xticks)
years_fmt = mdates.DateFormatter('%Y')
ax_group.xaxis.set_major_locator(mdates.YearLocator())
ax_group.xaxis.set_major_formatter(years_fmt)
ax_group.xaxis.set_minor_locator(mdates.MonthLocator())
# ax_group.xaxis.tick_top()
# ax_group.xaxis.set_ticks_position('both')
# ax_group.tick_params(axis='x', labeltop='off', top='on',
# bottom='on', labelbottom='on')
ax_group.grid()
# ax_group.axvline('2015-09-15')
# ax_group.axhline(2.5)
# plt.setp(ax_group.xaxis.get_majorticklabels(), rotation=45 )
fig.tight_layout()
fig.subplots_adjust(right=0.946)
if save:
filename = 'pw_monthly_means_anomaly_heatmap.png'
plt.savefig(savefig_path / filename, bbox_inches='tight', pad_inches=0.1)
return ts
def plot_grp_anomlay_heatmap(load_path=work_yuval, gis_path=gis_path,
thresh=50, grp='hour', remove_grp=None, season=None,
n_clusters=4, save=True, title=False):
import xarray as xr
import seaborn as sns
import numpy as np
from PW_stations import group_anoms_and_cluster
from aux_gps import geo_annotate
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.colors import ListedColormap
from palettable.scientific import diverging as divsci
from PW_stations import produce_geo_gnss_solved_stations
div_cmap = divsci.Vik_20.mpl_colormap
dem_path = load_path / 'AW3D30'
def weighted_average(grp_df, weights_col='weights'):
return grp_df._get_numeric_data().multiply(
grp_df[weights_col], axis=0).sum() / grp_df[weights_col].sum()
df, labels_sorted, weights = group_anoms_and_cluster(
load_path=load_path, thresh=thresh, grp=grp, season=season,
n_clusters=n_clusters, remove_grp=remove_grp)
# create figure and subplots axes:
fig = plt.figure(figsize=(15, 10))
if title:
if season is not None:
fig.suptitle(
'Precipitable water {}ly anomalies analysis for {} season'.format(grp, season))
else:
fig.suptitle('Precipitable water {}ly anomalies analysis (Weighted KMeans {} clusters)'.format(
grp, n_clusters))
grid = plt.GridSpec(
2, 2, width_ratios=[
3, 2], height_ratios=[
4, 1], wspace=0.1, hspace=0)
ax_heat = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_group = fig.add_subplot(grid[1, 0]) # plt.subplot(223)
ax_map = fig.add_subplot(grid[0:, 1]) # plt.subplot(122)
# get the camp and zip it to groups and produce dictionary:
cmap = plt.get_cmap("Accent")
cmap = qualitative_cmap(n_clusters)
# cmap = plt.get_cmap("Set2_r")
# cmap = ListedColormap(cmap.colors[::-1])
groups = list(set(labels_sorted.values()))
palette = dict(zip(groups, [cmap(x) for x in range(len(groups))]))
label_cmap_dict = dict(zip(labels_sorted.keys(),
[palette[x] for x in labels_sorted.values()]))
cm = ListedColormap([x for x in palette.values()])
# plot heatmap and colorbar:
cbar_ax = fig.add_axes([0.57, 0.24, 0.01, 0.69]) # [left, bottom, width,
# height]
ax_heat = sns.heatmap(
df.T,
center=0.0,
cmap=div_cmap,
yticklabels=True,
ax=ax_heat,
cbar_ax=cbar_ax,
cbar_kws={'label': '[mm]'})
# activate top ticks and tickslabales:
ax_heat.xaxis.set_tick_params(top='on', labeltop='on')
# emphasize the yticklabels (stations):
ax_heat.yaxis.set_tick_params(left='on')
ax_heat.set_yticklabels(ax_heat.get_ymajorticklabels(),
fontweight='bold', fontsize=10)
# paint ytick labels with categorical cmap:
boxes = [dict(facecolor=x, boxstyle="square,pad=0.7", alpha=0.6)
for x in label_cmap_dict.values()]
ylabels = [x for x in ax_heat.yaxis.get_ticklabels()]
for label, box in zip(ylabels, boxes):
label.set_bbox(box)
# rotate xtick_labels:
# ax_heat.set_xticklabels(ax_heat.get_xticklabels(), rotation=0,
# fontsize=10)
# plot summed groups (with weights):
df_groups = df.T
df_groups['groups'] = pd.Series(labels_sorted)
df_groups['weights'] = weights
df_groups = df_groups.groupby('groups').apply(weighted_average)
df_groups.drop(['groups', 'weights'], axis=1, inplace=True)
df_groups.T.plot(ax=ax_group, linewidth=2.0, legend=False, cmap=cm)
if grp == 'hour':
ax_group.set_xlabel('hour (UTC)')
ax_group.grid()
group_limit = ax_heat.get_xlim()
ax_group.set_xlim(group_limit)
ax_group.set_ylabel('[mm]')
# set ticks and align with heatmap axis (move by 0.5):
ax_group.set_xticks(df.index.values)
offset = 1
ax_group.xaxis.set(ticks=np.arange(offset / 2.,
max(df.index.values) + 1 -
min(df.index.values),
offset),
ticklabels=df.index.values)
# move the lines also by 0.5 to align with heatmap:
lines = ax_group.lines # get the lines
[x.set_xdata(x.get_xdata() - min(df.index.values) + 0.5) for x in lines]
# plot israel map:
ax_map = plot_israel_map(gis_path=gis_path, ax=ax_map)
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
im = dem.plot.imshow(ax=ax_map, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = fig.colorbar(im, ax=ax_map, **cbar_kwargs)
# cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level', size=8, weight='normal')
cb.ax.tick_params(labelsize=8)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
print('getting solved GNSS israeli stations metadata...')
gps = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
gps.index = gps.index.str.upper()
gps = gps.loc[[x for x in df.columns], :]
gps['group'] = pd.Series(labels_sorted)
gps.plot(ax=ax_map, column='group', categorical=True, marker='o',
edgecolor='black', cmap=cm, s=100, legend=True, alpha=1.0,
legend_kwds={'prop': {'size': 10}, 'fontsize': 14,
'loc': 'upper left', 'title': 'clusters'})
# ax_map.set_title('Groupings of {}ly anomalies'.format(grp))
# annotate station names in map:
geo_annotate(ax_map, gps.lon, gps.lat,
gps.index, xytext=(6, 6), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
# plt.legend(['IMS stations', 'GNSS stations'],
# prop={'size': 10}, bbox_to_anchor=(-0.15, 1.0),
# title='Stations')
# plt.legend(prop={'size': 10}, loc='upper left')
# plt.tight_layout()
plt.subplots_adjust(top=0.92,
bottom=0.065,
left=0.065,
right=0.915,
hspace=0.19,
wspace=0.215)
filename = 'pw_{}ly_anoms_{}_clusters_with_map.png'.format(grp, n_clusters)
if save:
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return df
def plot_lomb_scargle(path=work_yuval, save=True):
from aux_gps import lomb_scargle_xr
import xarray as xr
pw_mm = xr.load_dataset(path / 'GNSS_PW_monthly_thresh_50_homogenized.nc')
pw_mm_median = pw_mm.to_array('station').median('station')
da = lomb_scargle_xr(
pw_mm_median.dropna('time'),
user_freq='MS',
kwargs={
'nyquist_factor': 1,
'samples_per_peak': 100})
plt.ylabel('')
plt.title('Lomb–Scargle periodogram')
plt.xlim([0, 4])
plt.grid()
filename = 'Lomb_scargle_monthly_means.png'
if save:
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return da
def plot_vertical_climatology_months(path=sound_path, field='Rho_wv',
center_month=7):
from aux_gps import path_glob
import xarray as xr
ds = xr.open_dataset(
path /
'bet_dagan_phys_sounding_height_2007-2019.nc')[field]
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
day = ds.sel(sound_time=ds['sound_time.hour'] == 12).groupby(
'sound_time.month').mean('sound_time')
night = ds.sel(sound_time=ds['sound_time.hour'] == 00).groupby(
'sound_time.month').mean('sound_time')
next_month = center_month + 1
last_month = center_month - 1
day = day.sel(month=[last_month, center_month, next_month])
night = night.sel(month=[last_month, center_month, next_month])
for month in day.month:
h = day.sel(month=month)['H-Msl'].values
rh = day.sel(month=month).values
ax[0].semilogy(rh, h)
ax[0].set_title('noon')
ax[0].set_ylabel('height [m]')
ax[0].set_xlabel('{}, [{}]'.format(field, day.attrs['units']))
plt.legend([x for x in ax.lines], [x for x in day.month.values])
for month in night.month:
h = night.sel(month=month)['H-Msl'].values
rh = night.sel(month=month).values
ax[1].semilogy(rh, h)
ax[1].set_title('midnight')
ax[1].set_ylabel('height [m]')
ax[1].set_xlabel('{}, [{}]'.format(field, night.attrs['units']))
plt.legend([x for x in ax.lines], [x for x in night.month.values])
return day, night
def plot_global_warming_with_pwv_annual(climate_path=climate_path, work_path=work_yuval, fontsize=16):
import pandas as pd
import xarray as xr
import numpy as np
from aux_gps import anomalize_xr
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style('whitegrid')
sns.set_style('ticks')
df = pd.read_csv(climate_path/'GLB.Ts+dSST_2007.csv',
header=1, na_values='*******')
df = df.iloc[:19, :13]
df = df.melt(id_vars='Year')
df['time'] = pd.to_datetime(df['Year'].astype(
str)+'-'+df['variable'].astype(str))
df = df.set_index('time')
df = df.drop(['Year', 'variable'], axis=1)
df.columns = ['T']
df['T'] = pd.to_numeric(df['T'])
df = df.sort_index()
df.columns = ['AIRS-ST-Global']
# df = df.loc['2003':'2019']
# df = df.resample('AS').mean()
dss = xr.open_dataset(climate_path/'AIRS.2002-2021.L3.RetStd_IR031.v7.0.3.0.nc')
dss = dss.sel(time=slice('2003','2019'), Longitude=slice(34,36), Latitude=slice(34,29))
ds = xr.concat([dss['SurfAirTemp_A'], dss['SurfAirTemp_D']], 'dn')
ds['dn'] = ['day', 'night']
ds = ds.mean('dn')
ds -= ds.sel(time=slice('2007','2016')).mean('time')
anoms = anomalize_xr(ds, 'MS')
anoms = anoms.mean('Latitude').mean('Longitude')
df['AIRS-ST-Regional'] = anoms.to_dataframe('AIRS-ST-Regional')
# else:
# df = pd.read_csv(climate_path/'GLB.Ts+dSST.csv',
# header=1, na_values='***')
# df = df.iloc[:, :13]
# df = df.melt(id_vars='Year')
# df['time'] = pd.to_datetime(df['Year'].astype(
# str)+'-'+df['variable'].astype(str))
# df = df.set_index('time')
# df = df.drop(['Year', 'variable'], axis=1)
# df.columns = ['T']
# # df = df.resample('AS').mean()
# df = df.sort_index()
pw = xr.load_dataset(work_path/'GNSS_PW_monthly_anoms_thresh_50.nc')
# pw_2007_2016_mean = pw.sel(time=slice('2007','2016')).mean()
# pw -= pw_2007_2016_mean
pw = pw.to_array('s').mean('s')
pw_df = pw.to_dataframe('PWV')
# df['pwv'] = pw_df.resample('AS').mean()
df['PWV'] = pw_df
df = df.loc['2003': '2019']
df = df.resample('AS').mean()
fig, ax = plt.subplots(figsize=(15, 6))
ax = df.plot(kind='bar', secondary_y='PWV',
color=['tab:red', 'tab:orange', 'tab:blue'],
ax=ax, legend=False, rot=45)
twin = get_twin(ax, 'x')
align_yaxis_np(ax, twin)
# twin.set_yticks([-0.5, 0, 0.5, 1.0, 1.5])
# locator = ticker.MaxNLocator(6)
# ax.yaxis.set_major_locator(locator)
twin.yaxis.set_major_locator(ticker.MaxNLocator(6))
twin.set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
ax.set_ylabel(r'Surface Temperature anomalies [$\degree$C]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
twin.tick_params(labelsize=fontsize)
ax.set_xticklabels(np.arange(2003, 2020))
ax.grid(True)
# add legend:
handles, labels = [], []
for h, l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
for h, l in zip(*twin.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
ax.legend(handles, labels, prop={'size': fontsize-2}, loc='upper left')
ax.set_xlabel('')
fig.tight_layout()
return df
def plot_SST_med(sst_path=work_yuval/'SST', fontsize=16, loop=True):
import xarray as xr
import seaborn as sns
from aux_gps import lat_mean
import numpy as np
def clim_mean(med_sst):
sst = med_sst - 273.15
mean_sst = sst.mean('lon')
mean_sst = lat_mean(mean_sst)
mean_sst = mean_sst.groupby('time.dayofyear').mean()
return mean_sst
sns.set_style('whitegrid')
sns.set_style('ticks')
ds = xr.open_dataset(
sst_path/'med1-1981_2020-NCEI-L4_GHRSST-SSTblend-AVHRR_OI-GLOB-v02.0-fv02.0.nc')
sst = ds['analysed_sst'].sel(time=slice('1997', '2019')).load()
whole_med_lon = [-5, 37]
whole_med_lat = [30, 40]
sst_w = sst.copy().sel(lat=slice(*whole_med_lat), lon=slice(*whole_med_lon))
sst_clim_w = clim_mean(sst_w)
df = sst_clim_w.to_dataframe('SST_whole_Med')
# now for emed:
for i, min_lon in enumerate(np.arange(23, 34, 1)):
e_med_lon = [min_lon, 37]
e_med_lat = [30, 40]
sst_e = sst.copy().sel(lat=slice(*e_med_lat), lon=slice(*e_med_lon))
sst_clim_e = clim_mean(sst_e)
df['SST_EMed_{}'.format(min_lon)] = sst_clim_e.to_dataframe()
# df['SST_EMed'] = sst_clim_e.to_dataframe()
if loop:
ax = df.idxmax().plot(kind='barh')
ax.set_xticks(np.linspace(0, 365, 13)[:-1])
ax.set_xticklabels(np.arange(1, 13))
ax.grid(True)
ax.set_xlabel('month')
else:
ax = df.plot(lw=2, legend=True)
ax.set_xticks(np.linspace(0, 365, 13)[:-1])
ax.set_xticklabels(np.arange(1, 13))
ax.grid(True)
ax.tick_params(labelsize=fontsize)
ax.set_ylabel(r'Temperature [$^{\circ}$C]', fontsize=fontsize)
ax.set_xlabel('month')
return df
def plot_SST_med_with_PWV_S1_panel(path=work_yuval,
sst_path=work_yuval/'SST',
ims_path=ims_path,
stations=['tela', 'jslm'], fontsize=16, save=True):
from ims_procedures import gnss_ims_dict
import matplotlib.pyplot as plt
ims_stations = [gnss_ims_dict.get(x) for x in stations]
fig, axes = plt.subplots(1, len(stations), figsize=(15, 6))
for i, (pwv, ims) in enumerate(zip(stations, ims_stations)):
plot_SST_med_with_PWV_first_annual_harmonic(path=work_yuval,
sst_path=sst_path,
ims_path=ims_path,
station=pwv, ims_station=ims,
fontsize=16, ax=axes[i],
save=False)
twin = get_twin(axes[i], 'x')
twin.set_ylim(-4.5, 4.5)
axes[i].set_ylim(8, 30)
fig.tight_layout()
if save:
filename = 'Med_SST_surface_temp_PWV_harmonic_annual_{}_{}.png'.format(
*stations)
plt.savefig(savefig_path / filename, orientation='portrait')
return
def plot_SST_med_with_PWV_first_annual_harmonic(path=work_yuval,
sst_path=work_yuval/'SST',
ims_path=ims_path,
station='tela', ims_station='TEL-AVIV-COAST',
fontsize=16, ax=None,
save=True):
import xarray as xr
from aux_gps import month_to_doy_dict
import pandas as pd
import numpy as np
from aux_gps import lat_mean
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
sns.set_style('ticks')
# load harmonics:
ds = xr.load_dataset(path/'GNSS_PW_harmonics_annual.nc')
# stns = group_sites_to_xarray(scope='annual').sel(group='coastal').values
# harms = []
# for stn in stns:
# da = ds['{}_mean'.format(stn)].sel(cpy=1)
# harms.append(da)
# harm_da = xr.concat(harms, 'station')
# harm_da['station'] = stns
harm_da = ds['{}_mean'.format(station)].sel(cpy=1).reset_coords(drop=True)
# harm_da = harm_da.reset_coords(drop=True)
harm_da['month'] = [month_to_doy_dict.get(
x) for x in harm_da['month'].values]
harm_da = harm_da.rename({'month': 'dayofyear'})
# df = harm_da.to_dataset('station').to_dataframe()
df = harm_da.to_dataframe(station)
# load surface temperature data:
# da = xr.open_dataset(ims_path/'GNSS_5mins_TD_ALL_1996_2020.nc')[station]
da = xr.open_dataset(ims_path / 'IMS_TD_israeli_10mins.nc')[ims_station]
da.load()
print(da.groupby('time.year').count())
# da += 273.15
da_mean = da.groupby('time.dayofyear').mean()
df['{}_ST'.format(station)] = da_mean.to_dataframe()
# add 366 dayofyear for visualization:
df366 = pd.DataFrame(df.iloc[0].values+0.01).T
df366.index = [366]
df366.columns = df.columns
df = df.append(df366)
ind = np.arange(1, 367)
df = df.reindex(ind)
df = df.interpolate('cubic')
# now load sst for MED
ds = xr.open_dataset(
sst_path/'med1-1981_2020-NCEI-L4_GHRSST-SSTblend-AVHRR_OI-GLOB-v02.0-fv02.0.nc')
sst = ds['analysed_sst'].sel(time=slice('1997', '2019')).load()
# sst_mean = sst.sel(lon=slice(25,35)).mean('lon')
sst -= 273.15
sst_mean = sst.mean('lon')
sst_mean = lat_mean(sst_mean)
sst_clim = sst_mean.groupby('time.dayofyear').mean()
df['Med-SST'] = sst_clim.to_dataframe()
pwv_name = '{} PWV-S1'.format(station.upper())
ims_name = '{} IMS-ST'.format(station.upper())
df.columns = [pwv_name, ims_name, 'Med-SST']
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
# first plot temp:
df[[ims_name, 'Med-SST']].plot(ax=ax, color=['tab:red', 'tab:blue'],
style=['-', '-'], lw=2, legend=False)
ax.set_xticks(np.linspace(0, 365, 13)[:-1])
ax.set_xticklabels(np.arange(1, 13))
ax.grid(True)
ax.tick_params(labelsize=fontsize)
ax.set_ylabel(r'Temperature [$^{\circ}$C]', fontsize=fontsize)
vl = df[[ims_name, 'Med-SST']].idxmax().to_frame('x')
vl['colors'] = ['tab:red', 'tab:blue']
vl['ymin'] = df[[ims_name, 'Med-SST']].min()
vl['ymax'] = df[[ims_name, 'Med-SST']].max()
print(vl)
ax.vlines(x=vl['x'], ymin=vl['ymin'], ymax=vl['ymax'],
colors=vl['colors'], zorder=0)
ax.plot(vl.iloc[0]['x'], vl.iloc[0]['ymax'], color=vl.iloc[0]['colors'],
linewidth=0, marker='o', zorder=15)
ax.plot(vl.iloc[1]['x'], vl.iloc[1]['ymax'], color=vl.iloc[1]['colors'],
linewidth=0, marker='o', zorder=15)
# ax.annotate(text='', xy=(213,15), xytext=(235,15), arrowprops=dict(arrowstyle='<->'), color='k')
# ax.arrow(213, 15, dx=21, dy=0, shape='full', color='k', width=0.25)
#p1 = patches.FancyArrowPatch((213, 15), (235, 15), arrowstyle='<->', mutation_scale=20)
# ax.arrow(217, 15, 16, 0, head_width=0.14, head_length=2,
# linewidth=2, color='k', length_includes_head=True)
# ax.arrow(231, 15, -16, 0, head_width=0.14, head_length=2,
# linewidth=2, color='k', length_includes_head=True)
start = vl.iloc[0]['x'] + 4
end = vl.iloc[1]['x'] - 4
mid = vl['x'].mean()
dy = vl.iloc[1]['x'] - vl.iloc[0]['x'] - 8
days = dy + 8
ax.arrow(start, 15, dy, 0, head_width=0.14, head_length=2,
linewidth=1.5, color='k', length_includes_head=True, zorder=20)
ax.arrow(end, 15, -dy, 0, head_width=0.14, head_length=2,
linewidth=1.5, color='k', length_includes_head=True, zorder=20)
t = ax.text(
mid, 15.8, "{} days".format(days), ha="center", va="center", rotation=0, size=12,
bbox=dict(boxstyle="round4,pad=0.15", fc="white", ec="k", lw=1), zorder=21)
twin = ax.twinx()
df[pwv_name].plot(ax=twin, color='tab:cyan', style='--', lw=2, zorder=0)
twin.set_ylabel('PWV annual anomalies [mm]', fontsize=fontsize)
ax.set_xlabel('month', fontsize=fontsize)
locator = ticker.MaxNLocator(7)
ax.yaxis.set_major_locator(locator)
twin.yaxis.set_major_locator(ticker.MaxNLocator(7))
# add legend:
handles, labels = [], []
for h, l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
for h, l in zip(*twin.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
ax.legend(handles, labels, prop={'size': fontsize-2}, loc='upper left')
# ax.right_ax.set_yticks(np.linspace(ax.right_ax.get_yticks()[0], ax.right_ax.get_yticks()[-1], 7))
twin.vlines(x=df[pwv_name].idxmax(), ymin=df[pwv_name].min(),
ymax=df[pwv_name].max(), colors=['tab:cyan'], ls=['--'], zorder=0)
twin.tick_params(labelsize=fontsize)
# plot points:
twin.plot(df[pwv_name].idxmax(), df[pwv_name].max(),
color='tab:cyan', linewidth=0, marker='o')
# fig.tight_layout()
if save:
filename = 'Med_SST_surface_temp_PWV_harmonic_annual_{}.png'.format(
station)
plt.savefig(savefig_path / filename, orientation='portrait')
return df
def plot_pw_lapse_rate_fit(path=work_yuval, model='TSEN', plot=True):
from PW_stations import produce_geo_gnss_solved_stations
import xarray as xr
from PW_stations import ML_Switcher
import pandas as pd
import matplotlib.pyplot as plt
pw = xr.load_dataset(path / 'GNSS_PW_thresh_50.nc')
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
df_gnss = produce_geo_gnss_solved_stations(plot=False)
df_gnss = df_gnss.loc[[x for x in pw.data_vars], :]
alt = df_gnss['alt'].values
# add mean to anomalies:
pw_new = pw.resample(time='MS').mean()
pw_mean = pw_new.mean('time')
# compute std:
# pw_std = pw_new.std('time')
pw_std = (pw_new.groupby('time.month') -
pw_new.groupby('time.month').mean('time')).std('time')
pw_vals = pw_mean.to_array().to_dataframe(name='pw')
pw_vals = pd.Series(pw_vals.squeeze()).values
pw_std_vals = pw_std.to_array().to_dataframe(name='pw')
pw_std_vals = pd.Series(pw_std_vals.squeeze()).values
ml = ML_Switcher()
fit_model = ml.pick_model(model)
y = pw_vals
X = alt.reshape(-1, 1)
fit_model.fit(X, y)
predict = fit_model.predict(X)
coef = fit_model.coef_[0]
inter = fit_model.intercept_
pw_lapse_rate = abs(coef)*1000
if plot:
fig, ax = plt.subplots(1, 1, figsize=(16, 4))
ax.errorbar(x=alt, y=pw_vals, yerr=pw_std_vals,
marker='.', ls='', capsize=1.5, elinewidth=1.5,
markeredgewidth=1.5, color='k')
ax.grid()
ax.plot(X, predict, c='r')
ax.set_xlabel('meters a.s.l')
ax.set_ylabel('Precipitable Water [mm]')
ax.legend(['{} ({:.2f} [mm/km], {:.2f} [mm])'.format(model,
pw_lapse_rate, inter)])
return df_gnss['alt'], pw_lapse_rate
def plot_time_series_as_barplot(ts, anoms=False, ts_ontop=None):
# plt.style.use('fast')
time_dim = list(set(ts.dims))[0]
fig, ax = plt.subplots(figsize=(20, 6), dpi=150)
import matplotlib.dates as mdates
import matplotlib.ticker
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)
import pandas as pd
if not anoms:
# sns.barplot(x=ts[time_dim].values, y=ts.values, ax=ax, linewidth=5)
ax.bar(ts[time_dim].values, ts.values, linewidth=5, width=0.0,
facecolor='black', edgecolor='black')
# Series.plot.bar(ax=ax, linewidth=0, width=1)
else:
warm = 'tab:orange'
cold = 'tab:blue'
positive = ts.where(ts > 0).dropna(time_dim)
negative = ts.where(ts < 0).dropna(time_dim)
ax.bar(
positive[time_dim].values,
positive.values,
linewidth=3.0,
width=1.0,
facecolor=warm, edgecolor=warm, alpha=1.0)
ax.bar(
negative[time_dim].values,
negative.values,
width=1.0,
linewidth=3.0,
facecolor=cold, edgecolor=cold, alpha=1.0)
if ts_ontop is not None:
ax_twin = ax.twinx()
color = 'red'
ts_ontop.plot.line(color=color, linewidth=2.0, ax=ax_twin)
# we already handled the x-label with ax1
ax_twin.set_ylabel('PW [mm]', color=color)
ax_twin.tick_params(axis='y', labelcolor=color)
ax_twin.legend(['3-month running mean of PW anomalies'])
title_add = ' and the median Precipitable Water anomalies from Israeli GNSS sites'
l2 = ax_twin.get_ylim()
ax.set_ylim(l2)
else:
title_add = ''
ax.grid(None)
ax.set_xlim([pd.to_datetime('1996'), pd.to_datetime('2020')])
ax.set_title('Multivariate ENSO Index Version 2 {}'.format(title_add))
ax.set_ylabel('MEI.v2')
# ax.xaxis.set_major_locator(MultipleLocator(20))
# Change minor ticks to show every 5. (20/4 = 5)
# ax.xaxis.set_minor_locator(AutoMinorLocator(4))
years_fmt = mdates.DateFormatter('%Y')
# ax.figure.autofmt_xdate()
ax.xaxis.set_major_locator(mdates.YearLocator(2))
ax.xaxis.set_minor_locator(mdates.YearLocator(1))
ax.xaxis.set_major_formatter(years_fmt)
# ax.xaxis.set_minor_locator(mdates.MonthLocator())
ax.figure.autofmt_xdate()
# plt.tick_params(
# axis='x', # changes apply to the x-axis
# which='both', # both major and minor ticks are affected
# bottom=True, # ticks along the bottom edge are off
# top=False, # ticks along the top edge are off
# labelbottom=True)
# fig.tight_layout()
plt.show()
return
def plot_tide_pw_lags(path=hydro_path, pw_anom=False, rolling='1H', save=True):
from aux_gps import path_glob
import xarray as xr
import numpy as np
file = path_glob(path, 'PW_tide_sites_*.nc')[-1]
if pw_anom:
file = path_glob(path, 'PW_tide_sites_anom_*.nc')[-1]
ds = xr.load_dataset(file)
names = [x for x in ds.data_vars]
fig, ax = plt.subplots(figsize=(8, 6))
for name in names:
da = ds.mean('station').mean('tide_start')[name]
ser = da.to_series()
if rolling is not None:
ser = ser.rolling(rolling).mean()
time = (ser.index / np.timedelta64(1, 'D')).astype(float)
# ser = ser.loc[pd.Timedelta(-2.2,unit='D'):pd.Timedelta(1, unit='D')]
ser.index = time
ser.plot(marker='.', linewidth=0., ax=ax)
ax.set_xlabel('Days around tide event')
if pw_anom:
ax.set_ylabel('PWV anomalies [mm]')
else:
ax.set_ylabel('PWV [mm]')
hstations = [ds[x].attrs['hydro_stations'] for x in ds.data_vars]
events = [ds[x].attrs['total_events'] for x in ds.data_vars]
fmt = list(zip(names, hstations, events))
ax.legend(['{} with {} stations ({} total events)'.format(x.upper(), y, z)
for x, y, z in fmt])
ax.set_xlim([-3, 1])
ax.axvline(0, color='k', linestyle='--')
ax.grid()
filename = 'pw_tide_sites.png'
if pw_anom:
filename = 'pw_tide_sites_anom.png'
if save:
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
# ax.xaxis.set_major_locator(mdates.HourLocator(interval=24)) # tick every two hours
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%H'))
# locator = mdates.AutoDateLocator(minticks=3, maxticks=7)
# formatter = mdates.ConciseDateFormatter(locator)
# ax.xaxis.set_major_locator(locator)
# ax.xaxis.set_major_formatter(formatter)
# title = 'Mean PW for tide stations near all GNSS stations'
# ax.set_title(title)
return
def plot_profiler(path=work_yuval, ceil_path=ceil_path, title=False,
field='maxsnr', save=True):
import xarray as xr
from ceilometers import read_coastal_BL_levi_2011
from aux_gps import groupby_half_hour_xr
from calendar import month_abbr
df = read_coastal_BL_levi_2011(path=ceil_path)
ds = df.to_xarray()
pw = xr.open_dataset(path / 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')
pw = pw['csar']
pw.load()
pw = pw.sel(time=pw['time.month'] == 7).dropna('time')
pw_size = pw.dropna('time').size
pwyears = [pw.time.dt.year.min().item(), pw.time.dt.year.max().item()]
pw_std = groupby_half_hour_xr(pw, reduce='std')['csar']
pw_hour = groupby_half_hour_xr(pw, reduce='mean')['csar']
pw_hour_plus = (pw_hour + pw_std).values
pw_hour_minus = (pw_hour - pw_std).values
if field == 'maxsnr':
mlh_hour = ds['maxsnr']
mlh_std = ds['std_maxsnr']
label = 'Max SNR'
elif field == 'tv_inversion':
mlh_hour = ds['tv_inversion']
mlh_std = ds['std_tv200']
label = 'Tv inversion'
mlh_hour_minus = (mlh_hour - mlh_std).values
mlh_hour_plus = (mlh_hour + mlh_std).values
half_hours = pw_hour.half_hour.values
fig, ax = plt.subplots(figsize=(10, 8))
red = 'tab:red'
blue = 'tab:blue'
pwln = pw_hour.plot(color=blue, marker='s', ax=ax)
ax.fill_between(half_hours, pw_hour_minus,
pw_hour_plus, color=blue, alpha=0.5)
twin = ax.twinx()
mlhln = mlh_hour.plot(color=red, marker='o', ax=twin)
twin.fill_between(half_hours, mlh_hour_minus,
mlh_hour_plus, color=red, alpha=0.5)
pw_label = 'PW: {}-{}, {} ({} pts)'.format(
pwyears[0], pwyears[1], month_abbr[7], pw_size)
mlh_label = 'MLH: {}-{}, {} ({} pts)'.format(1997, 1999, month_abbr[7], 90)
# if month is not None:
# pwmln = pw_m_hour.plot(color='tab:orange', marker='^', ax=ax)
# pwm_label = 'PW: {}-{}, {} ({} pts)'.format(pw_years[0], pw_years[1], month_abbr[month], pw_month.dropna('time').size)
# ax.legend(pwln + mlhln + pwmln, [pw_label, mlh_label, pwm_label], loc=leg_loc)
# else:
ax.legend([pwln[0], mlhln[0]], [pw_label, mlh_label], loc='best')
# plt.legend([pw_label, mlh_label])
ax.tick_params(axis='y', colors=blue)
twin.tick_params(axis='y', colors=red)
ax.set_ylabel('PW [mm]', color=blue)
twin.set_ylabel('MLH [m]', color=red)
twin.set_ylim(400, 1250)
ax.set_xticks([x for x in range(24)])
ax.set_xlabel('Hour of day [UTC]')
ax.grid()
mlh_name = 'Hadera'
textstr = '{}, {}'.format(mlh_name, pw.name.upper())
props = dict(boxstyle='round', facecolor='white', alpha=0.5)
ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=10,
verticalalignment='top', bbox=props)
if title:
ax.set_title('The diurnal cycle of {} Mixing Layer Height ({}) and {} GNSS site PW'.format(
mlh_name, label, pw.name.upper()))
fig.tight_layout()
if save:
filename = 'PW_diurnal_with_MLH_csar_{}.png'.format(field)
plt.savefig(savefig_path / filename, orientation='landscape')
return ax
def plot_ceilometers(path=work_yuval, ceil_path=ceil_path, interpolate='6H',
fontsize=14, save=True):
import xarray as xr
from ceilometers import twin_hourly_mean_plot
from ceilometers import read_all_ceilometer_stations
import numpy as np
pw = xr.open_dataset(path / 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')
pw = pw[['tela', 'jslm', 'yrcm', 'nzrt', 'klhv', 'csar']]
pw.load()
ds = read_all_ceilometer_stations(path=ceil_path)
if interpolate is not None:
attrs = [x.attrs for x in ds.data_vars.values()]
ds = ds.interpolate_na('time', max_gap=interpolate, method='cubic')
for i, da in enumerate(ds):
ds[da].attrs.update(attrs[i])
fig, axes = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(15, 6))
couples = [['tela', 'TLV'], ['jslm', 'JR']]
twins = []
for i, ax in enumerate(axes.flatten()):
ax, twin = twin_hourly_mean_plot(pw[couples[i][0]],
ds[couples[i][1]],
month=None,
ax=ax,
title=False,
leg_loc='best', fontsize=fontsize)
twins.append(twin)
ax.xaxis.set_ticks(np.arange(0, 23, 3))
ax.grid()
twin_ylim_min = min(min([x.get_ylim() for x in twins]))
twin_ylim_max = max(max([x.get_ylim() for x in twins]))
for twin in twins:
twin.set_ylim(twin_ylim_min, twin_ylim_max)
fig.tight_layout()
filename = 'PW_diurnal_with_MLH_tela_jslm.png'
if save:
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return fig
def plot_field_with_fill_between(da, dim='hour', mean_dim=None, ax=None,
color='b', marker='s'):
if dim not in da.dims:
raise KeyError('{} not in {}'.format(dim, da.name))
if mean_dim is None:
mean_dim = [x for x in da.dims if dim not in x][0]
da_mean = da.mean(mean_dim)
da_std = da.std(mean_dim)
da_minus = da_mean - da_std
da_plus = da_mean + da_std
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
line = da_mean.plot(color=color, marker=marker, ax=ax)
ax.fill_between(da_mean[dim], da_minus, da_plus, color=color, alpha=0.5)
return line
def plot_mean_with_fill_between_std(da, grp='hour', mean_dim='time', ax=None,
color='b', marker='s', alpha=0.5):
da_mean = da.groupby('{}.{}'.format(mean_dim, grp)
).mean('{}'.format(mean_dim))
da_std = da.groupby('{}.{}'.format(mean_dim, grp)
).std('{}'.format(mean_dim))
da_minus = da_mean - da_std
da_plus = da_mean + da_std
if ax is None:
fig, ax = plt.subplots(figsize=(8, 6))
line = da_mean.plot(color=color, marker=marker, ax=ax)
ax.fill_between(da_mean[grp], da_minus, da_plus, color=color, alpha=alpha)
return line
def plot_hist_with_seasons(da_ts):
import seaborn as sns
fig, ax = plt.subplots(figsize=(10, 7))
sns.kdeplot(da_ts.dropna('time'), ax=ax, color='k')
sns.kdeplot(
da_ts.sel(
time=da_ts['time.season'] == 'DJF').dropna('time'),
legend=False,
ax=ax,
shade=True)
sns.kdeplot(
da_ts.sel(
time=da_ts['time.season'] == 'MAM').dropna('time'),
legend=False,
ax=ax,
shade=True)
sns.kdeplot(
da_ts.sel(
time=da_ts['time.season'] == 'JJA').dropna('time'),
legend=False,
ax=ax,
shade=True)
sns.kdeplot(
da_ts.sel(
time=da_ts['time.season'] == 'SON').dropna('time'),
legend=False,
ax=ax,
shade=True)
plt.legend(['ALL', 'MAM', 'DJF', 'SON', 'JJA'])
return
def plot_diurnal_pw_all_seasons(path=work_yuval, season='ALL', synoptic=None,
fontsize=20, labelsize=18,
ylim=[-2.7, 3.3], save=True, dss=None):
import xarray as xr
from synoptic_procedures import slice_xr_with_synoptic_class
if dss is None:
gnss_filename = 'GNSS_PW_thresh_50_for_diurnal_analysis_removed_daily.nc'
pw = xr.load_dataset(path / gnss_filename)
else:
pw = dss
df_annual = pw.groupby('time.hour').mean().to_dataframe()
if season is None and synoptic is None:
# plot annual diurnal cycle only:
fg = plot_pw_geographical_segments(df_annual, fg=None, marker='o', color='b',
ylim=ylim)
legend = ['Annual']
elif season == 'ALL' and synoptic is None:
df_jja = pw.sel(time=pw['time.season'] == 'JJA').groupby(
'time.hour').mean().to_dataframe()
df_son = pw.sel(time=pw['time.season'] == 'SON').groupby(
'time.hour').mean().to_dataframe()
df_djf = pw.sel(time=pw['time.season'] == 'DJF').groupby(
'time.hour').mean().to_dataframe()
df_mam = pw.sel(time=pw['time.season'] == 'MAM').groupby(
'time.hour').mean().to_dataframe()
fg = plot_pw_geographical_segments(
df_jja,
fg=None,
marker='s',
color='tab:green',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=0, label='JJA')
fg = plot_pw_geographical_segments(
df_son,
fg=fg,
marker='^',
color='tab:red',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=1, label='SON')
fg = plot_pw_geographical_segments(
df_djf,
fg=fg,
marker='x',
color='tab:blue',
fontsize=fontsize,
labelsize=labelsize, zorder=2, label='DJF')
fg = plot_pw_geographical_segments(
df_mam,
fg=fg,
marker='+',
color='tab:orange',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=4, label='MAM')
fg = plot_pw_geographical_segments(df_annual, fg=fg, marker='d',
color='tab:purple', ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=3,
label='Annual')
elif season is None and synoptic == 'ALL':
df_pt = slice_xr_with_synoptic_class(
pw, path=path, syn_class='PT').groupby('time.hour').mean().to_dataframe()
df_rst = slice_xr_with_synoptic_class(
pw, path=path, syn_class='RST').groupby('time.hour').mean().to_dataframe()
df_cl = slice_xr_with_synoptic_class(
pw, path=path, syn_class='CL').groupby('time.hour').mean().to_dataframe()
df_h = slice_xr_with_synoptic_class(
pw, path=path, syn_class='H').groupby('time.hour').mean().to_dataframe()
fg = plot_pw_geographical_segments(
df_pt,
fg=None,
marker='s',
color='tab:green',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=0, label='PT')
fg = plot_pw_geographical_segments(
df_rst,
fg=fg,
marker='^',
color='tab:red',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=1, label='RST')
fg = plot_pw_geographical_segments(
df_cl,
fg=fg,
marker='x',
color='tab:blue',
fontsize=fontsize,
labelsize=labelsize, zorder=2, label='CL')
fg = plot_pw_geographical_segments(
df_h,
fg=fg,
marker='+',
color='tab:orange',
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=4, label='H')
fg = plot_pw_geographical_segments(df_annual, fg=fg, marker='d',
color='tab:purple', ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, zorder=3,
label='Annual')
sites = group_sites_to_xarray(False, scope='diurnal')
for i, (ax, site) in enumerate(zip(fg.axes.flatten(), sites.values.flatten())):
lns = ax.get_lines()
if site in ['yrcm']:
leg_loc = 'upper right'
elif site in ['nrif', 'elat']:
leg_loc = 'upper center'
elif site in ['ramo']:
leg_loc = 'lower center'
else:
leg_loc = None
# do legend for each panel:
# ax.legend(
# lns,
# legend,
# prop={
# 'size': 12},
# framealpha=0.5,
# fancybox=True,
# ncol=2,
# loc=leg_loc, fontsize=12)
lines_labels = [ax.get_legend_handles_labels() for ax in fg.fig.axes][0]
# lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]
fg.fig.legend(lines_labels[0], lines_labels[1], prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.029,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
if save:
filename = 'pw_diurnal_geo_{}.png'.format(season)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_climate_classification(path=climate_path, gis_path=gis_path,
fontsize=16):
import xarray as xr
from climate_works import read_climate_classification_legend
from PW_stations import produce_geo_gnss_solved_stations
import numpy as np
from matplotlib import colors
ras = xr.open_rasterio(path / 'Beck_KG_V1_present_0p0083.tif')
ds = ras.isel(band=0)
minx = 34.0
miny = 29.0
maxx = 36.5
maxy = 34.0
ds = ds.sortby('y')
ds = ds.sel(x=slice(minx, maxx), y=slice(miny, maxy))
ds = ds.astype(int)
ds = ds.reset_coords(drop=True)
ax_map = plot_israel_map(
gis_path=gis_path,
ax=None,
ticklabelsize=fontsize)
df = read_climate_classification_legend(path)
# get color pixels to dict:
d = df['color'].to_dict()
sort_idx = np.argsort([x for x in d.keys()])
idx = np.searchsorted([x for x in d.keys()], ds.values, sorter=sort_idx)
out = np.asarray([x for x in d.values()])[sort_idx][idx]
ds_as_color = xr.DataArray(out, dims=['y', 'x', 'c'])
ds_as_color['y'] = ds['y']
ds_as_color['x'] = ds['x']
ds_as_color['c'] = ['R', 'G', 'B']
# overlay with dem data:
# cmap = plt.get_cmap('terrain', 41)
# df_gnss = produce_geo_gnss_solved_stations(plot=False)
# c_colors = df.set_index('class_code').loc[df_gnss['code'].unique()]['color'].values
c_colors = df['color'].values
c_li = [c for c in c_colors]
c_colors = np.asarray(c_li)
c_colors = np.unique(ds_as_color.stack(coor=['x', 'y']).T.values, axis=0)
# remove black:
# c_colors = c_colors[:-1]
int_code = np.unique(ds.stack(coor=['x', 'y']).T.values, axis=0)
ticks = [df.loc[x]['class_code'] for x in int_code[1:]]
cc = [df.set_index('class_code').loc[x]['color'] for x in ticks]
cc_as_hex = [colors.rgb2hex(x) for x in cc]
tickd = dict(zip(cc_as_hex, ticks))
# ticks.append('Water')
# ticks.reverse()
bounds = [x for x in range(len(c_colors) + 1)]
chex = [colors.rgb2hex(x) for x in c_colors]
ticks = [tickd.get(x, 'Water') for x in chex]
cmap = colors.ListedColormap(chex)
norm = colors.BoundaryNorm(bounds, cmap.N)
# vmin = ds_as_color.min().item()
# vmax = ds_as_color.max().item()
im = ds_as_color.plot.imshow(
ax=ax_map,
alpha=.7,
add_colorbar=False,
cmap=cmap,
interpolation='antialiased',
origin='lower',
norm=norm)
# colours = im.cmap(im.norm(np.unique(ds_as_color)))
# chex = [colors.rgb2hex(x) for x in colours]
# cmap = colors.ListedColormap(chex)
# bounds=[x for x in range(len(colours))]
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(
im,
boundaries=bounds,
ticks=None,
ax=ax_map,
**cbar_kwargs)
cb.set_label(
label='climate classification',
size=fontsize,
weight='normal')
n = len(c_colors)
tick_locs = (np.arange(n) + 0.5) * (n) / n
cb.set_ticks(tick_locs)
# set tick labels (as before)
cb.set_ticklabels(ticks)
cb.ax.tick_params(labelsize=fontsize)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
# now for the gps stations:
gps = produce_geo_gnss_solved_stations(plot=False)
removed = ['hrmn', 'gilb', 'lhav', 'nizn', 'spir']
removed = []
print('removing {} stations from map.'.format(removed))
# merged = ['klhv', 'lhav', 'mrav', 'gilb']
merged = []
gps_list = [x for x in gps.index if x not in merged and x not in removed]
gps.loc[gps_list, :].plot(ax=ax_map, edgecolor='black', marker='s',
alpha=1.0, markersize=35, facecolor="None", linewidth=2, zorder=3)
gps_stations = gps_list
to_plot_offset = []
for x, y, label in zip(gps.loc[gps_stations, :].lon, gps.loc[gps_stations,
:].lat, gps.loc[gps_stations, :].index.str.upper()):
if label.lower() in to_plot_offset:
ax_map.annotate(label, xy=(x, y), xytext=(4, -6),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
else:
ax_map.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
return
def group_sites_to_xarray(upper=False, scope='diurnal'):
import xarray as xr
import numpy as np
if scope == 'diurnal':
group1 = ['KABR', 'BSHM', 'CSAR', 'TELA', 'ALON', 'SLOM', 'NIZN']
group2 = ['NZRT', 'MRAV', 'YOSH', 'JSLM', 'KLHV', 'YRCM', 'RAMO']
group3 = ['ELRO', 'KATZ', 'DRAG', 'DSEA', 'SPIR', 'NRIF', 'ELAT']
elif scope == 'annual':
group1 = ['KABR', 'BSHM', 'CSAR', 'TELA', 'ALON', 'SLOM', 'NIZN']
group2 = ['NZRT', 'MRAV', 'YOSH', 'JSLM', 'KLHV', 'YRCM', 'RAMO']
group3 = ['ELRO', 'KATZ', 'DRAG', 'DSEA', 'SPIR', 'NRIF', 'ELAT']
if not upper:
group1 = [x.lower() for x in group1]
group2 = [x.lower() for x in group2]
group3 = [x.lower() for x in group3]
gr1 = xr.DataArray(group1, dims='GNSS')
gr2 = xr.DataArray(group2, dims='GNSS')
gr3 = xr.DataArray(group3, dims='GNSS')
gr1['GNSS'] = np.arange(0, len(gr1))
gr2['GNSS'] = np.arange(0, len(gr2))
gr3['GNSS'] = np.arange(0, len(gr3))
sites = xr.concat([gr1, gr2, gr3], 'group').T
sites['group'] = ['coastal', 'highland', 'eastern']
return sites
# def plot_diurnal_pw_geographical_segments(df, fg=None, marker='o', color='b',
# ylim=[-2, 3]):
# import xarray as xr
# import numpy as np
# from matplotlib.ticker import MultipleLocator
# from PW_stations import produce_geo_gnss_solved_stations
# geo = produce_geo_gnss_solved_stations(plot=False)
# sites = group_sites_to_xarray(upper=False, scope='diurnal')
# sites_flat = [x for x in sites.values.flatten() if isinstance(x, str)]
# da = xr.DataArray([x for x in range(len(sites_flat))], dims='GNSS')
# da['GNSS'] = [x for x in range(len(da))]
# if fg is None:
# fg = xr.plot.FacetGrid(
# da,
# col='GNSS',
# col_wrap=3,
# sharex=False,
# sharey=False, figsize=(20, 20))
# for i in range(fg.axes.shape[0]): # i is rows
# for j in range(fg.axes.shape[1]): # j is cols
# try:
# site = sites.values[i, j]
# ax = fg.axes[i, j]
# df.loc[:, site].plot(ax=ax, marker=marker, color=color)
# ax.set_xlabel('Hour of day [UTC]')
# ax.yaxis.tick_left()
# ax.grid()
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.spines["bottom"].set_visible(False)
# ax.xaxis.set_ticks(np.arange(0, 23, 3))
# if j == 0:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
# elif j == 1:
# if i>5:
## ax.set_ylabel('PW anomalies [mm]', fontsize=12)
# site_label = '{} ({:.0f})'.format(site.upper(), geo.loc[site].alt)
# ax.text(.12, .85, site_label,
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
# ax.yaxis.set_minor_locator(MultipleLocator(3))
# ax.yaxis.grid(
# True,
# which='minor',
# linestyle='--',
# linewidth=1,
# alpha=0.7)
## ax.yaxis.grid(True, linestyle='--', linewidth=1, alpha=0.7)
# if ylim is not None:
# ax.set_ylim(*ylim)
# except KeyError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 0]):
# try:
## df[gr1].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 1]):
# try:
## df[gr2].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 2]):
# try:
## df[gr3].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
#
# fg.fig.tight_layout()
# fg.fig.subplots_adjust()
# return fg
def prepare_reanalysis_monthly_pwv_to_dataframe(path=work_yuval, re='era5',
ds=None):
import xarray as xr
import pandas as pd
if re == 'era5':
reanalysis = xr.load_dataset(work_yuval / 'GNSS_era5_monthly_PW.nc')
re_name = 'ERA5'
elif re == 'uerra':
reanalysis = xr.load_dataset(work_yuval / 'GNSS_uerra_monthly_PW.nc')
re_name = 'UERRA-HARMONIE'
elif re is not None and ds is not None:
reanalysis = ds
re_name = re
df_re = reanalysis.to_dataframe()
df_re['month'] = df_re.index.month
pw_mm = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_50_homogenized.nc')
df = pw_mm.to_dataframe()
df['month'] = df.index.month
# concat:
dff = pd.concat([df, df_re], keys=['GNSS', re_name])
dff['source'] = dff.index.get_level_values(0)
dff = dff.reset_index()
return dff
def plot_long_term_era5_comparison(path=work_yuval, era5_path=era5_path,
fontsize=16,
remove_stations=['nizn', 'spir'], save=True):
import xarray as xr
from aux_gps import anomalize_xr
# from aeronet_analysis import prepare_station_to_pw_comparison
# from PW_stations import ML_Switcher
# from aux_gps import get_julian_dates_from_da
# from scipy.stats.mstats import theilslopes
# TODO: add merra2, 3 panel plot and trend
# load GNSS Israel:
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
pw = xr.load_dataset(
path / 'GNSS_PW_monthly_thresh_50.nc').sel(time=slice('1998', None))
if remove_stations is not None:
pw = pw[[x for x in pw if x not in remove_stations]]
pw_anoms = anomalize_xr(pw, 'MS', verbose=False)
pw_percent = anomalize_xr(pw, 'MS', verbose=False, units='%')
pw_percent = pw_percent.to_array('station').mean('station')
pw_mean = pw_anoms.to_array('station').mean('station')
pw_mean = pw_mean.sel(time=slice('1998', '2019'))
# load ERA5:
era5 = xr.load_dataset(path / 'GNSS_era5_monthly_PW.nc')
era5_anoms = anomalize_xr(era5, 'MS', verbose=False)
era5_mean = era5_anoms.to_array('station').mean('station')
df = pw_mean.to_dataframe(name='GNSS')
# load MERRA2:
# merra2 = xr.load_dataset(
# path / 'MERRA2/MERRA2_TQV_israel_area_1995-2019.nc')['TQV']
# merra2_mm = merra2.resample(time='MS').mean()
# merra2_anoms = anomalize_xr(
# merra2_mm, time_dim='time', freq='MS', verbose=False)
# merra2_mean = merra2_anoms.mean('lat').mean('lon')
# load AERONET:
# if aero_path is not None:
# aero = prepare_station_to_pw_comparison(path=aero_path, gis_path=gis_path,
# station='boker', mm_anoms=True)
# df['AERONET'] = aero.to_dataframe()
era5_to_plot = era5_mean - 5
# merra2_to_plot = merra2_mean - 10
df['ERA5'] = era5_mean.to_dataframe(name='ERA5')
# df['MERRA2'] = merra2_mean.to_dataframe('MERRA2')
fig, ax = plt.subplots(1, 1, figsize=(16, 5))
# df['GNSS'].plot(ax=ax, color='k')
# df['ERA5'].plot(ax=ax, color='r')
# df['AERONET'].plot(ax=ax, color='b')
pwln = pw_mean.plot.line('k-', marker='o', ax=ax,
linewidth=2, markersize=3.5)
era5ln = era5_to_plot.plot.line(
'k--', marker='s', ax=ax, linewidth=2, markersize=3.5)
# merra2ln = merra2_to_plot.plot.line(
# 'g-', marker='d', ax=ax, linewidth=2, markersize=2.5)
era5corr = df.corr().loc['GNSS', 'ERA5']
# merra2corr = df.corr().loc['GNSS', 'MERRA2']
handles = pwln + era5ln # + merra2ln
# labels = ['GNSS', 'ERA5, r={:.2f}'.format(
# era5corr), 'MERRA2, r={:.2f}'.format(merra2corr)]
labels = ['GNSS station average', 'ERA5 regional mean, r={:.2f}'.format(
era5corr)]
ax.legend(handles=handles, labels=labels, loc='upper left',
prop={'size': fontsize-2})
# if aero_path is not None:
# aeroln = aero.plot.line('b-.', ax=ax, alpha=0.8)
# aerocorr = df.corr().loc['GNSS', 'AERONET']
# aero_label = 'AERONET, r={:.2f}'.format(aerocorr)
# handles += aeroln
ax.set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
ax.tick_params(labelsize=fontsize)
ax.set_xlabel('')
ax.grid()
ax = fix_time_axis_ticks(ax, limits=['1998-01', '2020-01'])
fig.tight_layout()
if save:
filename = 'pwv_long_term_anomalies_era5_comparison.png'
plt.savefig(savefig_path / filename, orientation='portrait')
return fig
def plot_long_term_anomalies_with_trends(path=work_yuval,
model_name='TSEN',
fontsize=16,
remove_stations=['nizn', 'spir'],
save=True,
add_percent=False): # ,aero_path=aero_path):
import xarray as xr
from aux_gps import anomalize_xr
from PW_stations import mann_kendall_trend_analysis
from aux_gps import linear_fit_using_scipy_da_ts
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
pw = xr.load_dataset(
path / 'GNSS_PW_monthly_thresh_50.nc').sel(time=slice('1998', None))
if remove_stations is not None:
pw = pw[[x for x in pw if x not in remove_stations]]
pw_anoms = anomalize_xr(pw, 'MS', verbose=False)
pw_percent = anomalize_xr(pw, 'MS', verbose=False, units='%')
pw_percent = pw_percent.to_array('station').mean('station')
pw_mean = pw_anoms.to_array('station').mean('station')
pw_mean = pw_mean.sel(time=slice('1998', '2019'))
if add_percent:
fig, axes = plt.subplots(2, 1, figsize=(16, 10))
else:
fig, ax = plt.subplots(1, 1, figsize=(16, 5))
axes = [ax, ax]
pwln = pw_mean.plot.line('k-', marker='o', ax=axes[0],
linewidth=2, markersize=5.5)
handles = pwln
labels = ['GNSS station average']
pwv_trends, trend_dict = linear_fit_using_scipy_da_ts(
pw_mean, model=model_name, slope_factor=3652.5, plot=False)
trend = pwv_trends['trend']
trend_hi = pwv_trends['trend_hi']
trend_lo = pwv_trends['trend_lo']
slope_hi = trend_dict['slope_hi']
slope_lo = trend_dict['slope_lo']
slope = trend_dict['slope']
mann_pval = mann_kendall_trend_analysis(pw_mean).loc['p']
trend_label = r'{} model, slope={:.2f} ({:.2f}, {:.2f}) mm$\cdot$decade$^{{-1}}$, pvalue={:.4f}'.format(
model_name, slope, slope_lo, slope_hi, mann_pval)
labels.append(trend_label)
trendln = trend.plot(ax=axes[0], color='b', linewidth=2, alpha=1)
handles += trendln
trend_hi.plot.line('b--', ax=axes[0], linewidth=1.5, alpha=0.8)
trend_lo.plot.line('b--', ax=axes[0], linewidth=1.5, alpha=0.8)
pwv_trends, trend_dict = linear_fit_using_scipy_da_ts(
pw_mean.sel(time=slice('2010', '2019')), model=model_name, slope_factor=3652.5, plot=False)
mann_pval = mann_kendall_trend_analysis(pw_mean.sel(time=slice('2010','2019'))).loc['p']
trend = pwv_trends['trend']
trend_hi = pwv_trends['trend_hi']
trend_lo = pwv_trends['trend_lo']
slope_hi = trend_dict['slope_hi']
slope_lo = trend_dict['slope_lo']
slope = trend_dict['slope']
trendln = trend.plot(ax=axes[0], color='r', linewidth=2, alpha=1)
handles += trendln
trend_label = r'{} model, slope={:.2f} ({:.2f}, {:.2f}) mm$\cdot$decade$^{{-1}}$, pvalue={:.4f}'.format(
model_name, slope, slope_lo, slope_hi, mann_pval)
labels.append(trend_label)
trend_hi.plot.line('r--', ax=axes[0], linewidth=1.5, alpha=0.8)
trend_lo.plot.line('r--', ax=axes[0], linewidth=1.5, alpha=0.8)
# ax.grid()
# ax.set_xlabel('')
# ax.set_ylabel('PWV mean anomalies [mm]')
# ax.legend(labels=[],handles=[trendln[0]])
# fig.tight_layout()
axes[0].legend(handles=handles, labels=labels, loc='upper left',
prop={'size': fontsize-2})
axes[0].set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
axes[0].tick_params(labelsize=fontsize)
axes[0].set_xlabel('')
axes[0].grid(True)
axes[0] = fix_time_axis_ticks(axes[0], limits=['1998-01', '2020-01'])
if add_percent:
pwln = pw_percent.plot.line('k-', marker='o', ax=axes[1],
linewidth=2, markersize=5.5)
handles = pwln
labels = ['GNSS station average']
pwv_trends, trend_dict = linear_fit_using_scipy_da_ts(
pw_percent, model=model_name, slope_factor=3652.5, plot=False)
trend = pwv_trends['trend']
trend_hi = pwv_trends['trend_hi']
trend_lo = pwv_trends['trend_lo']
slope_hi = trend_dict['slope_hi']
slope_lo = trend_dict['slope_lo']
slope = trend_dict['slope']
mann_pval = mann_kendall_trend_analysis(pw_percent).loc['p']
trend_label = r'{} model, slope={:.2f} ({:.2f}, {:.2f}) %$\cdot$decade$^{{-1}}$, pvalue={:.4f}'.format(
model_name, slope, slope_lo, slope_hi, mann_pval)
labels.append(trend_label)
trendln = trend.plot(ax=axes[1], color='b', linewidth=2, alpha=1)
handles += trendln
trend_hi.plot.line('b--', ax=axes[1], linewidth=1.5, alpha=0.8)
trend_lo.plot.line('b--', ax=axes[1], linewidth=1.5, alpha=0.8)
pwv_trends, trend_dict = linear_fit_using_scipy_da_ts(
pw_percent.sel(time=slice('2010', '2019')), model=model_name, slope_factor=3652.5, plot=False)
mann_pval = mann_kendall_trend_analysis(pw_percent.sel(time=slice('2010','2019'))).loc['p']
trend = pwv_trends['trend']
trend_hi = pwv_trends['trend_hi']
trend_lo = pwv_trends['trend_lo']
slope_hi = trend_dict['slope_hi']
slope_lo = trend_dict['slope_lo']
slope = trend_dict['slope']
trendln = trend.plot(ax=axes[1], color='r', linewidth=2, alpha=1)
handles += trendln
trend_label = r'{} model, slope={:.2f} ({:.2f}, {:.2f}) %$\cdot$decade$^{{-1}}$, pvalue={:.4f}'.format(
model_name, slope, slope_lo, slope_hi, mann_pval)
labels.append(trend_label)
trend_hi.plot.line('r--', ax=axes[1], linewidth=1.5, alpha=0.8)
trend_lo.plot.line('r--', ax=axes[1], linewidth=1.5, alpha=0.8)
# ax.grid()
# ax.set_xlabel('')
# ax.set_ylabel('PWV mean anomalies [mm]')
# ax.legend(labels=[],handles=[trendln[0]])
# fig.tight_layout()
axes[1].legend(handles=handles, labels=labels, loc='upper left',
prop={'size': fontsize-2})
axes[1].set_ylabel('PWV anomalies [%]', fontsize=fontsize)
axes[1].tick_params(labelsize=fontsize)
axes[1].set_xlabel('')
axes[1].grid()
axes[1] = fix_time_axis_ticks(axes[1], limits=['1998-01', '2020-01'])
fig.tight_layout()
if save:
filename = 'pwv_station_averaged_trends.png'
plt.savefig(savefig_path / filename, orientation='portrait')
return fig
def plot_day_night_pwv_monthly_mean_std_heatmap(
path=work_yuval, day_time=['09:00', '15:00'], night_time=['17:00', '21:00'], compare=['day', 'std']):
import xarray as xr
import seaborn as sns
import matplotlib.pyplot as plt
pw = xr.load_dataset(work_yuval / 'GNSS_PW_thresh_50_homogenized.nc')
pw = pw[[x for x in pw if 'error' not in x]]
df = pw.to_dataframe()
sites = group_sites_to_xarray(upper=False, scope='annual')
coast = [x for x in sites.sel(group='coastal').dropna('GNSS').values]
high = [x for x in sites.sel(group='highland').dropna('GNSS').values]
east = [x for x in sites.sel(group='eastern').dropna('GNSS').values]
box_coast = dict(facecolor='cyan', pad=0.05, alpha=0.4)
box_high = dict(facecolor='green', pad=0.05, alpha=0.4)
box_east = dict(facecolor='yellow', pad=0.05, alpha=0.4)
color_dict = [{x: box_coast} for x in coast]
color_dict += [{x: box_high} for x in high]
color_dict += [{x: box_east} for x in east]
color_dict = dict((key, d[key]) for d in color_dict for key in d)
sites = sites.T.values.ravel()
sites_flat = [x for x in sites if isinstance(x, str)]
df = df[sites_flat]
df_mm = df.resample('MS').mean()
df_mm_mean = df_mm.groupby(df_mm.index.month).mean()
df_mm_std = df_mm.groupby(df_mm.index.month).std()
df_day = df.between_time(*day_time)
df_night = df.between_time(*night_time)
df_day_mm = df_day.resample('MS').mean()
df_night_mm = df_night.resample('MS').mean()
day_std = df_day_mm.groupby(df_day_mm.index.month).std()
night_std = df_night_mm.groupby(df_night_mm.index.month).std()
day_mean = df_day_mm.groupby(df_day_mm.index.month).mean()
night_mean = df_night_mm.groupby(df_night_mm.index.month).mean()
per_day_std = 100 * (day_std - df_mm_std) / df_mm_std
per_day_mean = 100 * (day_mean - df_mm_mean) / df_mm_mean
per_night_std = 100 * (night_std - df_mm_std) / df_mm_std
per_night_mean = 100 * (night_mean - df_mm_mean) / df_mm_mean
day_night = compare[0]
mean_std = compare[1]
fig, axes = plt.subplots(
1, 2, sharex=False, sharey=False, figsize=(17, 10))
cbar_ax = fig.add_axes([.91, .3, .03, .4])
if compare[1] == 'std':
all_heat = df_mm_std.T
day_heat = day_std.T
title = 'STD'
elif compare[1] == 'mean':
all_heat = df_mm_mean.T
day_heat = day_mean.T
title = 'MEAN'
vmax = max(day_heat.max().max(), all_heat.max().max())
vmin = min(day_heat.min().min(), all_heat.min().min())
sns.heatmap(all_heat, ax=axes[0], cbar=False, vmin=vmin, vmax=vmax,
annot=True, cbar_ax=None, cmap='Reds')
sns.heatmap(day_heat, ax=axes[1], cbar=True, vmin=vmin, vmax=vmax,
annot=True, cbar_ax=cbar_ax, cmap='Reds')
labels_1 = [x for x in axes[0].yaxis.get_ticklabels()]
[label.set_bbox(color_dict[label.get_text()]) for label in labels_1]
labels_2 = [x for x in axes[1].yaxis.get_ticklabels()]
[label.set_bbox(color_dict[label.get_text()]) for label in labels_2]
axes[0].set_title('All {} in mm'.format(title))
axes[1].set_title('Day only ({}-{}) {} in mm'.format(*day_time, title))
[ax.set_xlabel('month') for ax in axes]
fig.tight_layout(rect=[0, 0, .9, 1])
# fig, axes = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(17, 10))
# ax_mean = sns.heatmap(df_mm_mean.T, annot=True, ax=axes[0])
# ax_mean.set_title('All mean in mm')
# ax_std = sns.heatmap(df_mm_std.T, annot=True, ax=axes[1])
# ax_std.set_title('All std in mm')
# labels_mean = [x for x in ax_mean.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_mean]
# labels_std = [x for x in ax_std.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_std]
# [ax.set_xlabel('month') for ax in axes]
# fig.tight_layout()
# fig_day, axes_day = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(17, 10))
# ax_mean = sns.heatmap(per_day_mean.T, annot=True, cmap='bwr', center=0, ax=axes_day[0])
# ax_mean.set_title('Day mean - All mean in % from All mean')
# ax_std = sns.heatmap(per_day_std.T, annot=True, cmap='bwr', center=0, ax=axes_day[1])
# ax_std.set_title('Day std - All std in % from All std')
# labels_mean = [x for x in ax_mean.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_mean]
# labels_std = [x for x in ax_std.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_std]
# [ax.set_xlabel('month') for ax in axes_day]
# fig_day.tight_layout()
# fig_night, axes_night = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(17, 10))
# ax_mean = sns.heatmap(per_night_mean.T, annot=True, cmap='bwr', center=0, ax=axes_night[0])
# ax_mean.set_title('Night mean - All mean in % from All mean')
# ax_std = sns.heatmap(per_night_std.T, annot=True, cmap='bwr', center=0, ax=axes_night[1])
# ax_std.set_title('Night std - All std in % from All std')
# labels_mean = [x for x in ax_mean.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_mean]
# labels_std = [x for x in ax_std.yaxis.get_ticklabels()]
# [label.set_bbox(color_dict[label.get_text()]) for label in labels_std]
# [ax.set_xlabel('month') for ax in axes_night]
# fig_night.tight_layout()
return fig
def plot_pw_geographical_segments(df, scope='diurnal', kind=None, fg=None,
marker='o', color='b', ylim=[-2, 3],
hue=None, fontsize=14, labelsize=10,
ticklabelcolor=None,
zorder=0, label=None, save=False, bins=None):
import xarray as xr
import numpy as np
from scipy.stats import kurtosis
from scipy.stats import skew
from matplotlib.ticker import MultipleLocator
from PW_stations import produce_geo_gnss_solved_stations
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import FormatStrFormatter
import seaborn as sns
scope_dict = {'diurnal': {'xticks': np.arange(0, 23, 3),
'xlabel': 'Hour of day [UTC]',
'ylabel': 'PWV anomalies [mm]',
'colwrap': 3},
'annual': {'xticks': np.arange(1, 13),
'xlabel': 'month',
'ylabel': 'PWV [mm]',
'colwrap': 3}
}
sns.set_style('whitegrid')
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
color_dict = produce_colors_for_pwv_station(scope=scope, zebra=False, as_dict=True)
geo = produce_geo_gnss_solved_stations(plot=False)
sites = group_sites_to_xarray(upper=False, scope=scope)
# if scope == 'annual':
# sites = sites.T
sites_flat = [x for x in sites.values.flatten() if isinstance(x, str)]
da = xr.DataArray([x for x in range(len(sites_flat))], dims='GNSS')
da['GNSS'] = [x for x in range(len(da))]
if fg is None:
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=scope_dict[scope]['colwrap'],
sharex=False,
sharey=False, figsize=(20, 20))
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
site = sites.values[i, j]
ax = fg.axes[i, j]
if not isinstance(site, str):
ax.set_axis_off()
continue
else:
if kind is None:
df[site].plot(ax=ax, marker=marker, color=color,
zorder=zorder, label=label)
ax.xaxis.set_ticks(scope_dict[scope]['xticks'])
ax.grid(True, which='major')
ax.grid(True, axis='y', which='minor', linestyle='--')
elif kind == 'violin':
if not 'month' in df.columns:
df['month'] = df.index.month
pal = sns.color_palette("Paired", 12)
sns.violinplot(ax=ax, data=df, x='month', y=df[site],
hue=hue,
fliersize=4, gridsize=250, inner='quartile',
scale='area')
ax.set_ylabel('')
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.grid(True, axis='y', which='major')
ax.grid(True, axis='y', which='minor', linestyle='--')
elif kind == 'violin+swarm':
if not 'month' in df.columns:
df['month'] = df.index.month
pal = sns.color_palette("Paired", 12)
pal = sns.color_palette("tab20")
sns.violinplot(ax=ax, data=df, x='month', y=df[site],
hue=None, color=color_dict.get(site), fliersize=4, gridsize=250, inner=None,
scale='width')
sns.swarmplot(ax=ax, data=df, x='month', y=df[site],
color="k", edgecolor="gray",
size=2.8)
ax.set_ylabel('')
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.grid(True, axis='y', which='major')
ax.grid(True, axis='y', which='minor', linestyle='--')
elif kind == 'mean_month':
if not 'month' in df.columns:
df['month'] = df.index.month
df_mean = df.groupby('month').mean()
df_mean[site].plot(ax=ax, color=color, marker='o', markersize=10, markerfacecolor="None")
ax.set_ylabel('')
ax.xaxis.set_ticks(scope_dict[scope]['xticks'])
ax.set_xlabel('')
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.grid(True, axis='y', which='major')
ax.grid(True, axis='y', which='minor', linestyle='--')
elif kind == 'hist':
if bins is None:
bins = 15
sns.histplot(ax=ax, data=df[site].dropna(),
line_kws={'linewidth': 3}, stat='density', kde=True, bins=bins)
ax.set_xlabel('PWV [mm]', fontsize=fontsize)
ax.grid(True)
ax.set_ylabel('')
xmean = df[site].mean()
xmedian = df[site].median()
std = df[site].std()
sk = skew(df[site].dropna().values)
kurt = kurtosis(df[site].dropna().values)
# xmode = df[y].mode().median()
data_x, data_y = ax.lines[0].get_data()
ymean = np.interp(xmean, data_x, data_y)
ymed = np.interp(xmedian, data_x, data_y)
# ymode = np.interp(xmode, data_x, data_y)
ax.vlines(x=xmean, ymin=0, ymax=ymean,
color='r', linestyle='--', linewidth=3)
ax.vlines(x=xmedian, ymin=0, ymax=ymed,
color='g', linestyle='-', linewidth=3)
# ax.vlines(x=xmode, ymin=0, ymax=ymode, color='k', linestyle='-')
ax.legend(['Mean: {:.1f}'.format(
xmean), 'Median: {:.1f}'.format(xmedian)], fontsize=fontsize)
# ax.text(0.55, 0.45, "Std-Dev: {:.1f}\nSkewness: {:.1f}\nKurtosis: {:.1f}".format(std, sk, kurt),transform=ax.transAxes, fontsize=fontsize)
ax.tick_params(axis='x', which='major', labelsize=labelsize)
if kind != 'hist':
ax.set_xlabel(scope_dict[scope]['xlabel'], fontsize=16)
ax.yaxis.set_major_locator(plt.MaxNLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.tick_params(axis='y', which='major', labelsize=labelsize)
# set minor y tick labels:
# ax.yaxis.set_minor_formatter(FormatStrFormatter("%.2f"))
# ax.tick_params(axis='y', which='minor', labelsize=labelsize-8)
ax.yaxis.tick_left()
if j == 0:
if kind != 'hist':
ax.set_ylabel(scope_dict[scope]['ylabel'], fontsize=16)
else:
ax.set_ylabel('Frequency', fontsize=16)
# elif j == 1:
# if i>5:
# ax.set_ylabel(scope_dict[scope]['ylabel'], fontsize=12)
site_label = '{} ({:.0f})'.format(
site.upper(), geo.loc[site].alt)
ax.text(.17, .87, site_label, fontsize=fontsize,
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
if ticklabelcolor is not None:
ax.tick_params(axis='y', labelcolor=ticklabelcolor)
# ax.yaxis.grid(
# True,
# which='minor',
# linestyle='--',
# linewidth=1,
# alpha=0.7)
# ax.yaxis.grid(True, linestyle='--', linewidth=1, alpha=0.7)
if ylim is not None:
ax.set_ylim(*ylim)
# except KeyError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 0]):
# try:
# df[gr1].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 1]):
# try:
# df[gr2].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
# for i, ax in enumerate(fg.axes[:, 2]):
# try:
# df[gr3].iloc[:, i].plot(ax=ax)
# except IndexError:
# ax.set_axis_off()
fg.fig.tight_layout()
fg.fig.subplots_adjust()
if save:
filename = 'pw_{}_means_{}.png'.format(scope, kind)
plt.savefig(savefig_path / filename, orientation='portrait')
# plt.savefig(savefig_path / filename, orientation='landscape')
return fg
def plot_PWV_comparison_GNSS_radiosonde(path=work_yuval, sound_path=sound_path,
save=True, fontsize=16):
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.patches as mpatches
import matplotlib
matplotlib.rcParams['lines.markeredgewidth'] = 1
sns.set_style('whitegrid')
sns.set_style('ticks')
pal = sns.color_palette("tab10", 2)
# load radiosonde:
radio = xr.load_dataarray(sound_path / 'bet_dagan_2s_sounding_PWV_2014-2019.nc')
radio = radio.rename({'sound_time': 'time'})
radio = radio.resample(time='MS').mean()
radio.name = 'radio'
dfr = radio.to_dataframe()
dfr['month'] = dfr.index.month
# load tela:
tela = xr.load_dataset(path / 'GNSS_PW_monthly_thresh_50.nc')['tela']
dfm = tela.to_dataframe(name='tela-pwv')
dfm = dfm.loc[dfr.index]
dfm['month'] = dfm.index.month
dff = pd.concat([dfm, dfr], keys=['GNSS-TELA', 'Radiosonde'])
dff['source'] = dff.index.get_level_values(0)
# dff['month'] = dfm.index.month
dff = dff.reset_index()
dff['pwv'] = dff['tela-pwv'].fillna(0)+dff['radio'].fillna(0)
dff = dff[dff['pwv'] != 0]
fig = plt.figure(figsize=(20, 6))
sns.set_style("whitegrid")
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
grid = plt.GridSpec(
1, 2, width_ratios=[
2, 1], wspace=0.1, hspace=0)
ax_ts = fig.add_subplot(grid[0]) # plt.subplot(221)
ax_v = fig.add_subplot(grid[1])
# fig, axes = plt.subplots(1, 2, figsize=(20, 6))
ax_v = sns.violinplot(data=dff, x='month', y='pwv',
fliersize=10, gridsize=250, ax=ax_v,
inner=None, scale='width', palette=pal,
hue='source', split=True, zorder=20)
[x.set_alpha(0.5) for x in ax_v.collections]
ax_v = sns.pointplot(x='month', y='pwv', data=dff, estimator=np.mean,
dodge=True, ax=ax_v, hue='source', color=None,
linestyles='-', markers=['s', 'o'], scale=0.7,
ci=None, alpha=0.5, zorder=0, style='source',edgecolor='k', edgewidth=0.4)
ax_v.get_legend().set_title('')
p1 = (mpatches.Patch(facecolor=pal[0], edgecolor='k', alpha=0.5))
p2 = (mpatches.Patch(facecolor=pal[1], edgecolor='k', alpha=0.5))
handles = [p1, p2]
ax_v.legend(handles=handles, labels=['GNSS-TELA', 'Radiosonde'],
loc='upper left', prop={'size': fontsize-2})
# ax_v.legend(loc='upper left', prop={'size': fontsize-2})
ax_v.tick_params(labelsize=fontsize)
ax_v.set_ylabel('')
ax_v.grid(True, axis='both')
ax_v.set_xlabel('month', fontsize=fontsize)
df = dfm['tela-pwv'].to_frame()
df.columns = ['GNSS-TELA']
df['Radiosonde'] = dfr['radio']
cmap = sns.color_palette("tab10", as_cmap=True)
df.plot(ax=ax_ts, style=['s-', 'o-'], cmap=cmap)
# df['GNSS-TELA'].plot(ax=ax_ts, style='s-', cmap=cmap)
# df['Radiosonde'].plot(ax=ax_ts, style='o-', cmap=cmap)
ax_ts.grid(True, axis='both')
ylim = ax_v.get_ylim()
ax_ts.set_ylim(*ylim)
ax_ts.set_ylabel('PWV [mm]', fontsize=fontsize)
ax_ts.set_xlabel('')
ax_ts.legend(loc='upper left', prop={'size': fontsize-2})
ax_ts.tick_params(labelsize=fontsize)
fig.tight_layout()
if save:
filename = 'pwv_radio_comparison_violin+ts.png'
plt.savefig(savefig_path / filename, orientation='landscape',bbox_inches='tight')
return fig
def prepare_diurnal_variability_table(path=work_yuval, rename_cols=True):
from PW_stations import calculate_diurnal_variability
df = calculate_diurnal_variability()
gr = group_sites_to_xarray(scope='diurnal')
gr_df = gr.to_dataframe('sites')
new = gr.T.values.ravel()
geo = [gr_df[gr_df == x].dropna().index.values.item()[1] for x in new]
geo = [x.title() for x in geo]
df = df.reindex(new)
if rename_cols:
df.columns = ['Annual [%]', 'JJA [%]', 'SON [%]', 'DJF [%]', 'MAM [%]']
cols = [x for x in df.columns]
df['Location'] = geo
cols = ['Location'] + cols
df = df[cols]
df.index = df.index.str.upper()
print(df.to_latex())
print('')
print(df.groupby('Location').mean().to_latex())
return df
def prepare_harmonics_table(path=work_yuval, season='ALL',
scope='diurnal', era5=False, add_third=False):
import xarray as xr
from aux_gps import run_MLR_harmonics
import pandas as pd
import numpy as np
from calendar import month_abbr
if scope == 'diurnal':
cunits = 'cpd'
grp = 'hour'
grp_slice = [0, 12]
tunits = 'UTC'
elif scope == 'annual':
cunits = 'cpy'
grp = 'month'
grp_slice = [7, 12]
tunits = 'month'
if era5:
ds = xr.load_dataset(work_yuval / 'GNSS_PW_ERA5_harmonics_annual.nc')
else:
ds = xr.load_dataset(work_yuval / 'GNSS_PW_harmonics_{}.nc'.format(scope))
stations = list(set([x.split('_')[0] for x in ds]))
records = []
for station in stations:
if season in ds.dims:
diu_ph = ds[station + '_mean'].sel({season: season, cunits: 1}).idxmax()
diu_amp = ds[station + '_mean'].sel({season: season, cunits: 1}).max()
semidiu_ph = ds[station +
'_mean'].sel({season: season, cunits: 2, grp: slice(*grp_slice)}).idxmax()
semidiu_amp = ds[station +
'_mean'].sel({season: season, cunits: 2, grp: slice(*grp_slice)}).max()
else:
diu_ph = ds[station + '_mean'].sel({cunits: 1}).idxmax()
diu_amp = ds[station + '_mean'].sel({cunits: 1}).max()
semidiu_ph = ds[station +
'_mean'].sel({cunits: 2, grp: slice(*grp_slice)}).idxmax()
semidiu_amp = ds[station +
'_mean'].sel({cunits: 2, grp: slice(*grp_slice)}).max()
if add_third:
third_ph = ds[station +
'_mean'].sel({cunits: 3, grp: slice(*grp_slice)}).idxmax()
third_amp = ds[station +
'_mean'].sel({cunits: 3, grp: slice(*grp_slice)}).max()
ds_for_MLR = ds[['{}'.format(station), '{}_mean'.format(station)]]
if add_third:
harm_di = run_MLR_harmonics(
ds_for_MLR, season=season, cunits=cunits, plot=False)
record = [station, diu_amp.item(), diu_ph.item(), harm_di[1],
semidiu_amp.item(), semidiu_ph.item(), harm_di[2],
third_amp.item(), third_ph.item(), harm_di[3],
harm_di[1] + harm_di[2] + harm_di[3]]
else:
harm_di = run_MLR_harmonics(
ds_for_MLR, season=season, cunits=cunits, plot=False)
record = [station, diu_amp.item(), diu_ph.item(), harm_di[1],
semidiu_amp.item(), semidiu_ph.item(), harm_di[2],
harm_di[1] + harm_di[2]]
records.append(record)
df = | pd.DataFrame(records) | pandas.DataFrame |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import importlib.resources
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
# Construct a dictionary mapping a canonical fuel name to a list of strings
# which are used to represent that fuel in the FERC Form 1 Reporting. Case is
# ignored, as all fuel strings can be converted to a lower case in the data
# set.
# Previous categories of ferc1_biomass_strings and ferc1_stream_strings have
# been deleted and their contents redistributed to ferc1_waste_strings and
# ferc1_other_strings
ferc1_coal_strings = [
'coal', 'coal-subbit', 'lignite', 'coal(sb)', 'coal (sb)', 'coal-lignite',
'coke', 'coa', 'lignite/coal', 'coal - subbit', 'coal-subb', 'coal-sub',
'coal-lig', 'coal-sub bit', 'coals', 'ciak', 'petcoke', 'coal.oil',
'coal/gas', 'bit coal', 'coal-unit #3', 'coal-subbitum', 'coal tons',
'coal mcf', 'coal unit #3', 'pet. coke', 'coal-u3', 'coal&coke', 'tons'
]
"""
list: A list of strings which are used to represent coal fuel in FERC Form 1
reporting.
"""
ferc1_oil_strings = [
'oil', '#6 oil', '#2 oil', 'fuel oil', 'jet', 'no. 2 oil', 'no.2 oil',
'no.6& used', 'used oil', 'oil-2', 'oil (#2)', 'diesel oil',
'residual oil', '# 2 oil', 'resid. oil', 'tall oil', 'oil/gas',
'no.6 oil', 'oil-fuel', 'oil-diesel', 'oil / gas', 'oil bbls', 'oil bls',
'no. 6 oil', '#1 kerosene', 'diesel', 'no. 2 oils', 'blend oil',
'#2oil diesel', '#2 oil-diesel', '# 2 oil', 'light oil', 'heavy oil',
'gas.oil', '#2', '2', '6', 'bbl', 'no 2 oil', 'no 6 oil', '#1 oil', '#6',
'oil-kero', 'oil bbl', 'biofuel', 'no 2', 'kero', '#1 fuel oil',
'no. 2 oil', 'blended oil', 'no 2. oil', '# 6 oil', 'nno. 2 oil',
'#2 fuel', 'oill', 'oils', 'gas/oil', 'no.2 oil gas', '#2 fuel oil',
'oli', 'oil (#6)', 'oil/diesel', '2 oil', '#6 hvy oil', 'jet fuel',
'diesel/compos', 'oil-8', 'oil {6}', 'oil-unit #1', 'bbl.', 'oil.',
'oil #6', 'oil (6)', 'oil(#2)', 'oil-unit1&2', 'oil-6', '#2 fue oil',
'dielel oil', 'dielsel oil', '#6 & used', 'barrels', 'oil un 1 & 2',
'jet oil', 'oil-u1&2', 'oiul', 'pil', 'oil - 2', '#6 & used', 'oial'
]
"""
list: A list of strings which are used to represent oil fuel in FERC Form 1
reporting.
"""
ferc1_gas_strings = [
'gas', 'gass', 'methane', 'natural gas', 'blast gas', 'gas mcf',
'propane', 'prop', 'natural gas', 'nat.gas', 'nat gas',
'nat. gas', 'natl gas', 'ga', 'gas`', 'syngas', 'ng', 'mcf',
'blast gaa', 'nat gas', 'gac', 'syngass', 'prop.', 'natural', 'coal.gas',
'n. gas', 'lp gas', 'natuaral gas', 'coke gas', 'gas #2016', 'propane**',
'* propane', 'propane **', 'gas expander', 'gas ct', '# 6 gas', '#6 gas',
'coke oven gas'
]
"""
list: A list of strings which are used to represent gas fuel in FERC Form 1
reporting.
"""
ferc1_solar_strings = []
ferc1_wind_strings = []
ferc1_hydro_strings = []
ferc1_nuke_strings = [
'nuclear', 'grams of uran', 'grams of', 'grams of ura',
'grams', 'nucleur', 'nulear', 'nucl', 'nucleart', 'nucelar',
'gr.uranium', 'grams of urm', 'nuclear (9)', 'nulcear', 'nuc',
'gr. uranium', 'nuclear mw da', 'grams of ura'
]
"""
list: A list of strings which are used to represent nuclear fuel in FERC Form
1 reporting.
"""
ferc1_waste_strings = [
'tires', 'tire', 'refuse', 'switchgrass', 'wood waste', 'woodchips',
'biomass', 'wood', 'wood chips', 'rdf', 'tires/refuse', 'tire refuse',
'waste oil', 'waste', 'woodships', 'tire chips'
]
"""
list: A list of strings which are used to represent waste fuel in FERC Form 1
reporting.
"""
ferc1_other_strings = [
'steam', 'purch steam', 'all', 'tdf', 'n/a', 'purch. steam', 'other',
'composite', 'composit', 'mbtus', 'total', 'avg', 'avg.', 'blo',
'all fuel', 'comb.', 'alt. fuels', 'na', 'comb', '/#=2\x80â\x91?',
'kã\xadgv¸\x9d?', "mbtu's", 'gas, oil', 'rrm', '3\x9c', 'average',
'furfural', '0', 'watson bng', 'toal', 'bng', '# 6 & used', 'combined',
'blo bls', 'compsite', '*', 'compos.', 'gas / oil', 'mw days', 'g', 'c',
'lime', 'all fuels', 'at right', '20', '1', 'comp oil/gas', 'all fuels to',
'the right are', 'c omposite', 'all fuels are', 'total pr crk',
'all fuels =', 'total pc', 'comp', 'alternative', 'alt. fuel', 'bio fuel',
'total prairie', ''
]
"""list: A list of strings which are used to represent other fuels in FERC Form
1 reporting.
"""
# There are also a bunch of other weird and hard to categorize strings
# that I don't know what to do with... hopefully they constitute only a
# small fraction of the overall generation.
ferc1_fuel_strings = {"coal": ferc1_coal_strings,
"oil": ferc1_oil_strings,
"gas": ferc1_gas_strings,
"solar": ferc1_solar_strings,
"wind": ferc1_wind_strings,
"hydro": ferc1_hydro_strings,
"nuclear": ferc1_nuke_strings,
"waste": ferc1_waste_strings,
"other": ferc1_other_strings
}
"""dict: A dictionary linking fuel types (keys) to lists of various strings
representing that fuel (values)
"""
# Similarly, dictionary for cleaning up fuel unit strings
ferc1_ton_strings = ['toms', 'taons', 'tones', 'col-tons', 'toncoaleq', 'coal',
'tons coal eq', 'coal-tons', 'ton', 'tons', 'tons coal',
'coal-ton', 'tires-tons', 'coal tons -2 ',
'coal tons 200', 'ton-2000', 'coal tons -2', 'coal tons',
'coal-tone', 'tire-ton', 'tire-tons', 'ton coal eqv']
"""list: A list of fuel unit strings for tons."""
ferc1_mcf_strings = \
['mcf', "mcf's", 'mcfs', 'mcf.', 'gas mcf', '"gas" mcf', 'gas-mcf',
'mfc', 'mct', ' mcf', 'msfs', 'mlf', 'mscf', 'mci', 'mcl', 'mcg',
'm.cu.ft.', 'kcf', '(mcf)', 'mcf *(4)', 'mcf00', 'm.cu.ft..']
"""list: A list of fuel unit strings for thousand cubic feet."""
ferc1_bbl_strings = \
['barrel', 'bbls', 'bbl', 'barrels', 'bbrl', 'bbl.', 'bbls.',
'oil 42 gal', 'oil-barrels', 'barrrels', 'bbl-42 gal',
'oil-barrel', 'bb.', 'barrells', 'bar', 'bbld', 'oil- barrel',
'barrels .', 'bbl .', 'barels', 'barrell', 'berrels', 'bb',
'bbl.s', 'oil-bbl', 'bls', 'bbl:', 'barrles', 'blb', 'propane-bbl',
'barriel', 'berriel', 'barrile', '(bbl.)', 'barrel *(4)', '(4) barrel',
'bbf', 'blb.', '(bbl)', 'bb1', 'bbsl', 'barrrel', 'barrels 100%',
'bsrrels', "bbl's", '*barrels', 'oil - barrels', 'oil 42 gal ba', 'bll',
'boiler barrel', 'gas barrel', '"boiler" barr', '"gas" barrel',
'"boiler"barre', '"boiler barre', 'barrels .']
"""list: A list of fuel unit strings for barrels."""
ferc1_gal_strings = ['gallons', 'gal.', 'gals', 'gals.', 'gallon', 'gal',
'galllons']
"""list: A list of fuel unit strings for gallons."""
ferc1_1kgal_strings = ['oil(1000 gal)', 'oil(1000)', 'oil (1000)', 'oil(1000',
'oil(1000ga)']
"""list: A list of fuel unit strings for thousand gallons."""
ferc1_gramsU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'gram', 'grams', 'gm u', 'grams u235', 'grams u-235', 'grams of uran',
'grams: u-235', 'grams:u-235', 'grams:u235', 'grams u308', 'grams: u235',
'grams of', 'grams - n/a', 'gms uran', 's e uo2 grams', 'gms uranium',
'grams of urm', 'gms. of uran', 'grams (100%)', 'grams v-235',
'se uo2 grams'
]
"""list: A list of fuel unit strings for grams."""
ferc1_kgU_strings = [ # noqa: N816 (U-ranium is capitalized...)
'kg of uranium', 'kg uranium', 'kilg. u-235', 'kg u-235', 'kilograms-u23',
'kg', 'kilograms u-2', 'kilograms', 'kg of', 'kg-u-235', 'kilgrams',
'kilogr. u235', 'uranium kg', 'kg uranium25', 'kilogr. u-235',
'kg uranium 25', 'kilgr. u-235', 'kguranium 25', 'kg-u235'
]
"""list: A list of fuel unit strings for thousand grams."""
ferc1_mmbtu_strings = ['mmbtu', 'mmbtus', 'mbtus', '(mmbtu)',
"mmbtu's", 'nuclear-mmbtu', 'nuclear-mmbt']
"""list: A list of fuel unit strings for million British Thermal Units."""
ferc1_mwdth_strings = \
['mwd therman', 'mw days-therm', 'mwd thrml', 'mwd thermal',
'mwd/mtu', 'mw days', 'mwdth', 'mwd', 'mw day', 'dth', 'mwdaysthermal',
'mw day therml', 'mw days thrml', 'nuclear mwd', 'mmwd', 'mw day/therml'
'mw days/therm', 'mw days (th', 'ermal)']
"""list: A list of fuel unit strings for megawatt days thermal."""
ferc1_mwhth_strings = ['mwh them', 'mwh threm', 'nwh therm', 'mwhth',
'mwh therm', 'mwh', 'mwh therms.', 'mwh term.uts',
'mwh thermal', 'mwh thermals', 'mw hr therm',
'mwh therma', 'mwh therm.uts']
"""list: A list of fuel unit strings for megawatt hours thermal."""
ferc1_fuel_unit_strings = {'ton': ferc1_ton_strings,
'mcf': ferc1_mcf_strings,
'bbl': ferc1_bbl_strings,
'gal': ferc1_gal_strings,
'1kgal': ferc1_1kgal_strings,
'gramsU': ferc1_gramsU_strings,
'kgU': ferc1_kgU_strings,
'mmbtu': ferc1_mmbtu_strings,
'mwdth': ferc1_mwdth_strings,
'mwhth': ferc1_mwhth_strings
}
"""
dict: A dictionary linking fuel units (keys) to lists of various strings
representing those fuel units (values)
"""
# Categorizing the strings from the FERC Form 1 Plant Kind (plant_kind) field
# into lists. There are many strings that weren't categorized,
# Solar and Solar Project were not classified as these do not indicate if they
# are solar thermal or photovoltaic. Variants on Steam (e.g. "steam 72" and
# "steam and gas") were classified based on additional research of the plants
# on the Internet.
ferc1_plant_kind_steam_turbine = [
'coal', 'steam', 'steam units 1 2 3', 'steam units 4 5',
'steam fossil', 'steam turbine', 'steam a', 'steam 100',
'steam units 1 2 3', 'steams', 'steam 1', 'steam retired 2013', 'stream',
'steam units 1,2,3', 'steam units 4&5', 'steam units 4&6',
'steam conventional', 'unit total-steam', 'unit total steam',
'*resp. share steam', 'resp. share steam', 'steam (see note 1,',
'steam (see note 3)', 'mpc 50%share steam', '40% share steam'
'steam (2)', 'steam (3)', 'steam (4)', 'steam (5)', 'steam (6)',
'steam (7)', 'steam (8)', 'steam units 1 and 2', 'steam units 3 and 4',
'steam (note 1)', 'steam (retired)', 'steam (leased)', 'coal-fired steam',
'oil-fired steam', 'steam/fossil', 'steam (a,b)', 'steam (a)', 'stean',
'steam-internal comb', 'steam (see notes)', 'steam units 4 & 6',
'resp share stm note3' 'mpc50% share steam', 'mpc40%share steam',
'steam - 64%', 'steam - 100%', 'steam (1) & (2)', 'resp share st note3',
'mpc 50% shares steam', 'steam-64%', 'steam-100%', 'steam (see note 1)',
'mpc 50% share steam', 'steam units 1, 2, 3', 'steam units 4, 5',
'steam (2)', 'steam (1)', 'steam 4, 5', 'steam - 72%', 'steam (incl i.c.)',
'steam- 72%', 'steam;retired - 2013', "respondent's sh.-st.",
"respondent's sh-st", '40% share steam', 'resp share stm note3',
'mpc50% share steam', 'resp share st note 3', '\x02steam (1)',
]
"""
list: A list of strings from FERC Form 1 for the steam turbine plant kind.
"""
ferc1_plant_kind_combustion_turbine = [
'combustion turbine', 'gt', 'gas turbine',
'gas turbine # 1', 'gas turbine', 'gas turbine (note 1)',
'gas turbines', 'simple cycle', 'combustion turbine',
'comb.turb.peak.units', 'gas turbine', 'combustion turbine',
'com turbine peaking', 'gas turbine peaking', 'comb turb peaking',
'combustine turbine', 'comb. turine', 'conbustion turbine',
'combustine turbine', 'gas turbine (leased)', 'combustion tubine',
'gas turb', 'gas turbine peaker', 'gtg/gas', 'simple cycle turbine',
'gas-turbine', 'gas turbine-simple', 'gas turbine - note 1',
'gas turbine #1', 'simple cycle', 'gasturbine', 'combustionturbine',
'gas turbine (2)', 'comb turb peak units', 'jet engine',
'jet powered turbine', '*gas turbine', 'gas turb.(see note5)',
'gas turb. (see note', 'combutsion turbine', 'combustion turbin',
'gas turbine-unit 2', 'gas - turbine', 'comb turbine peaking',
'gas expander turbine', 'jet turbine', 'gas turbin (lease',
'gas turbine (leased', 'gas turbine/int. cm', 'comb.turb-gas oper.',
'comb.turb.gas/oil op', 'comb.turb.oil oper.', 'jet', 'comb. turbine (a)',
'gas turb.(see notes)', 'gas turb(see notes)', 'comb. turb-gas oper',
'comb.turb.oil oper', 'gas turbin (leasd)', 'gas turbne/int comb',
'gas turbine (note1)', 'combution turbin', '* gas turbine',
'add to gas turbine', 'gas turbine (a)', 'gas turbinint comb',
'gas turbine (note 3)', 'resp share gas note3', 'gas trubine',
'*gas turbine(note3)', 'gas turbine note 3,6', 'gas turbine note 4,6',
'gas turbine peakload', 'combusition turbine', 'gas turbine (lease)',
'comb. turb-gas oper.', 'combution turbine', 'combusion turbine',
'comb. turb. oil oper', 'combustion burbine', 'combustion and gas',
'comb. turb.', 'gas turbine (lease', 'gas turbine (leasd)',
'gas turbine/int comb', '*gas turbine(note 3)', 'gas turbine (see nos',
'i.c.e./gas turbine', 'gas turbine/intcomb', 'cumbustion turbine',
'gas turb, int. comb.', 'gas turb, diesel', 'gas turb, int. comb',
'i.c.e/gas turbine', 'diesel turbine', 'comubstion turbine',
'i.c.e. /gas turbine', 'i.c.e/ gas turbine', 'i.c.e./gas tubine',
]
"""list: A list of strings from FERC Form 1 for the combustion turbine plant
kind.
"""
ferc1_plant_kind_combined_cycle = [
'Combined cycle', 'combined cycle', 'combined', 'gas & steam turbine',
'gas turb. & heat rec', 'combined cycle', 'com. cyc', 'com. cycle',
'gas turb-combined cy', 'combined cycle ctg', 'combined cycle - 40%',
'com cycle gas turb', 'combined cycle oper', 'gas turb/comb. cyc',
'combine cycle', 'cc', 'comb. cycle', 'gas turb-combined cy',
'steam and cc', 'steam cc', 'gas steam', 'ctg steam gas',
'steam comb cycle', 'gas/steam comb. cycl', 'steam (comb. cycle)'
'gas turbine/steam', 'steam & gas turbine', 'gas trb & heat rec',
'steam & combined ce', 'st/gas turb comb cyc', 'gas tur & comb cycl',
'combined cycle (a,b)', 'gas turbine/ steam', 'steam/gas turb.',
'steam & comb cycle', 'gas/steam comb cycle', 'comb cycle (a,b)', 'igcc',
'steam/gas turbine', 'gas turbine / steam', 'gas tur & comb cyc',
'comb cyc (a) (b)', 'comb cycle', 'comb cyc', 'combined turbine',
'combine cycle oper', 'comb cycle/steam tur', 'cc / gas turb',
'steam (comb. cycle)', 'steam & cc', 'gas turbine/steam',
'gas turb/cumbus cycl', 'gas turb/comb cycle', 'gasturb/comb cycle',
'gas turb/cumb. cyc', 'igcc/gas turbine', 'gas / steam', 'ctg/steam-gas',
'ctg/steam -gas'
]
"""
list: A list of strings from FERC Form 1 for the combined cycle plant kind.
"""
ferc1_plant_kind_nuke = [
'nuclear', 'nuclear (3)', 'steam(nuclear)', 'nuclear(see note4)'
'nuclear steam', 'nuclear turbine', 'nuclear - steam',
'nuclear (a)(b)(c)', 'nuclear (b)(c)', '* nuclear', 'nuclear (b) (c)',
'nuclear (see notes)', 'steam (nuclear)', '* nuclear (note 2)',
'nuclear (note 2)', 'nuclear (see note 2)', 'nuclear(see note4)',
'nuclear steam', 'nuclear(see notes)', 'nuclear-steam',
'nuclear (see note 3)'
]
"""list: A list of strings from FERC Form 1 for the nuclear plant kind."""
ferc1_plant_kind_geothermal = [
'steam - geothermal', 'steam_geothermal', 'geothermal'
]
"""list: A list of strings from FERC Form 1 for the geothermal plant kind."""
ferc_1_plant_kind_internal_combustion = [
'ic', 'internal combustion', 'internal comb.', 'internl combustion'
'diesel turbine', 'int combust (note 1)', 'int. combust (note1)',
'int.combustine', 'comb. cyc', 'internal comb', 'diesel', 'diesel engine',
'internal combustion', 'int combust - note 1', 'int. combust - note1',
'internal comb recip', 'reciprocating engine', 'comb. turbine',
'internal combust.', 'int. combustion (1)', '*int combustion (1)',
"*internal combust'n", 'internal', 'internal comb.', 'steam internal comb',
'combustion', 'int. combustion', 'int combust (note1)', 'int. combustine',
'internl combustion', '*int. combustion (1)'
]
"""
list: A list of strings from FERC Form 1 for the internal combustion plant
kind.
"""
ferc1_plant_kind_wind = [
'wind', 'wind energy', 'wind turbine', 'wind - turbine', 'wind generation'
]
"""list: A list of strings from FERC Form 1 for the wind plant kind."""
ferc1_plant_kind_photovoltaic = [
'solar photovoltaic', 'photovoltaic', 'solar', 'solar project'
]
"""list: A list of strings from FERC Form 1 for the photovoltaic plant kind."""
ferc1_plant_kind_solar_thermal = ['solar thermal']
"""
list: A list of strings from FERC Form 1 for the solar thermal plant kind.
"""
# Making a dictionary of lists from the lists of plant_fuel strings to create
# a dictionary of plant fuel lists.
ferc1_plant_kind_strings = {
'steam': ferc1_plant_kind_steam_turbine,
'combustion_turbine': ferc1_plant_kind_combustion_turbine,
'combined_cycle': ferc1_plant_kind_combined_cycle,
'nuclear': ferc1_plant_kind_nuke,
'geothermal': ferc1_plant_kind_geothermal,
'internal_combustion': ferc_1_plant_kind_internal_combustion,
'wind': ferc1_plant_kind_wind,
'photovoltaic': ferc1_plant_kind_photovoltaic,
'solar_thermal': ferc1_plant_kind_solar_thermal
}
"""
dict: A dictionary of plant kinds (keys) and associated lists of plant_fuel
strings (values).
"""
# This is an alternative set of strings for simplifying the plant kind field
# from Uday & Laura at CPI. For the moment we have reverted to using our own
# categorizations which are more detailed, but these are preserved here for
# comparison and testing, if need be.
cpi_diesel_strings = ['DIESEL', 'Diesel Engine', 'Diesel Turbine', ]
"""
list: A list of strings for fuel type diesel compiled by Climate Policy
Initiative.
"""
cpi_geothermal_strings = ['Steam - Geothermal', ]
"""
list: A list of strings for fuel type geothermal compiled by Climate Policy
Initiative.
"""
cpi_natural_gas_strings = [
'Combined Cycle', 'Combustion Turbine', 'GT',
'GAS TURBINE', 'Comb. Turbine', 'Gas Turbine #1', 'Combine Cycle Oper',
'Combustion', 'Combined', 'Gas Turbine/Steam', 'Gas Turbine Peaker',
'Gas Turbine - Note 1', 'Resp Share Gas Note3', 'Gas Turbines',
'Simple Cycle', 'Gas / Steam', 'GasTurbine', 'Combine Cycle',
'CTG/Steam-Gas', 'GTG/Gas', 'CTG/Steam -Gas', 'Steam/Gas Turbine',
'CombustionTurbine', 'Gas Turbine-Simple', 'STEAM & GAS TURBINE',
'Gas & Steam Turbine', 'Gas', 'Gas Turbine (2)', 'COMBUSTION AND GAS',
'Com Turbine Peaking', 'Gas Turbine Peaking', 'Comb Turb Peaking',
'JET ENGINE', 'Comb. Cyc', 'Com. Cyc', 'Com. Cycle',
'GAS TURB-COMBINED CY', 'Gas Turb', 'Combined Cycle - 40%',
'IGCC/Gas Turbine', 'CC', 'Combined Cycle Oper', 'Simple Cycle Turbine',
'Steam and CC', 'Com Cycle Gas Turb', 'I.C.E/ Gas Turbine',
'Combined Cycle CTG', 'GAS-TURBINE', 'Gas Expander Turbine',
'Gas Turbine (Leased)', 'Gas Turbine # 1', 'Gas Turbine (Note 1)',
'COMBUSTINE TURBINE', 'Gas Turb, Int. Comb.', 'Combined Turbine',
'Comb Turb Peak Units', 'Combustion Tubine', 'Comb. Cycle',
'COMB.TURB.PEAK.UNITS', 'Steam and CC', 'I.C.E. /Gas Turbine',
'Conbustion Turbine', 'Gas Turbine/Int Comb', 'Steam & CC',
'GAS TURB. & HEAT REC', 'Gas Turb/Comb. Cyc', 'Comb. Turine',
]
"""list: A list of strings for fuel type gas compiled by Climate Policy
Initiative.
"""
cpi_nuclear_strings = ['Nuclear', 'Nuclear (3)', ]
"""list: A list of strings for fuel type nuclear compiled by Climate Policy
Initiative.
"""
cpi_other_strings = [
'IC', 'Internal Combustion', 'Int Combust - Note 1',
'Resp. Share - Note 2', 'Int. Combust - Note1', 'Resp. Share - Note 4',
'Resp Share - Note 5', 'Resp. Share - Note 7', 'Internal Comb Recip',
'Reciprocating Engine', 'Internal Comb', 'Resp. Share - Note 8',
'Resp. Share - Note 9', 'Resp Share - Note 11', 'Resp. Share - Note 6',
'INT.COMBUSTINE', 'Steam (Incl I.C.)', 'Other', 'Int Combust (Note 1)',
'Resp. Share (Note 2)', 'Int. Combust (Note1)', 'Resp. Share (Note 8)',
'Resp. Share (Note 9)', 'Resp Share (Note 11)', 'Resp. Share (Note 4)',
'Resp. Share (Note 6)', 'Plant retired- 2013', 'Retired - 2013',
]
"""list: A list of strings for fuel type other compiled by Climate Policy
Initiative.
"""
cpi_steam_strings = [
'Steam', 'Steam Units 1, 2, 3', 'Resp Share St Note 3',
'Steam Turbine', 'Steam-Internal Comb', 'IGCC', 'Steam- 72%', 'Steam (1)',
'Steam (1)', 'Steam Units 1,2,3', 'Steam/Fossil', 'Steams', 'Steam - 72%',
'Steam - 100%', 'Stream', 'Steam Units 4, 5', 'Steam - 64%', 'Common',
'Steam (A)', 'Coal', 'Steam;Retired - 2013', 'Steam Units 4 & 6',
]
"""list: A list of strings for fuel type steam compiled by Climate Policy
Initiative.
"""
cpi_wind_strings = ['Wind', 'Wind Turbine', 'Wind - Turbine', 'Wind Energy', ]
"""list: A list of strings for fuel type wind compiled by Climate Policy
Initiative.
"""
cpi_solar_strings = [
'Solar Photovoltaic', 'Solar Thermal', 'SOLAR PROJECT', 'Solar',
'Photovoltaic',
]
"""list: A list of strings for fuel type photovoltaic compiled by Climate
Policy Initiative.
"""
cpi_plant_kind_strings = {
'natural_gas': cpi_natural_gas_strings,
'diesel': cpi_diesel_strings,
'geothermal': cpi_geothermal_strings,
'nuclear': cpi_nuclear_strings,
'steam': cpi_steam_strings,
'wind': cpi_wind_strings,
'solar': cpi_solar_strings,
'other': cpi_other_strings,
}
"""dict: A dictionary linking fuel types (keys) to lists of strings associated
by Climate Policy Institute with those fuel types (values).
"""
# Categorizing the strings from the FERC Form 1 Type of Plant Construction
# (construction_type) field into lists.
# There are many strings that weren't categorized, including crosses between
# conventional and outdoor, PV, wind, combined cycle, and internal combustion.
# The lists are broken out into the two types specified in Form 1:
# conventional and outdoor. These lists are inclusive so that variants of
# conventional (e.g. "conventional full") and outdoor (e.g. "outdoor full"
# and "outdoor hrsg") are included.
ferc1_const_type_outdoor = [
'outdoor', 'outdoor boiler', 'full outdoor', 'outdoor boiler',
'outdoor boilers', 'outboilers', 'fuel outdoor', 'full outdoor',
'outdoors', 'outdoor', 'boiler outdoor& full', 'boiler outdoor&full',
'outdoor boiler& full', 'full -outdoor', 'outdoor steam',
'outdoor boiler', 'ob', 'outdoor automatic', 'outdoor repower',
'full outdoor boiler', 'fo', 'outdoor boiler & ful', 'full-outdoor',
'fuel outdoor', 'outoor', 'outdoor', 'outdoor boiler&full',
'boiler outdoor &full', 'outdoor boiler &full', 'boiler outdoor & ful',
'outdoor-boiler', 'outdoor - boiler', 'outdoor const.',
'4 outdoor boilers', '3 outdoor boilers', 'full outdoor', 'full outdoors',
'full oudoors', 'outdoor (auto oper)', 'outside boiler',
'outdoor boiler&full', 'outdoor hrsg', 'outdoor hrsg',
'outdoor-steel encl.', 'boiler-outdr & full',
'con.& full outdoor', 'partial outdoor', 'outdoor (auto. oper)',
'outdoor (auto.oper)', 'outdoor construction', '1 outdoor boiler',
'2 outdoor boilers', 'outdoor enclosure', '2 outoor boilers',
'boiler outdr.& full', 'boiler outdr. & full', 'ful outdoor',
'outdoor-steel enclos', 'outdoor (auto oper.)', 'con. & full outdoor',
'outdore', 'boiler & full outdor', 'full & outdr boilers',
'outodoor (auto oper)', 'outdoor steel encl.', 'full outoor',
'boiler & outdoor ful', 'otdr. blr. & f. otdr', 'f.otdr & otdr.blr.',
'oudoor (auto oper)', 'outdoor constructin', 'f. otdr. & otdr. blr',
]
"""list: A list of strings from FERC Form 1 associated with the outdoor
construction type.
"""
ferc1_const_type_semioutdoor = [
'more than 50% outdoo', 'more than 50% outdos', 'over 50% outdoor',
'over 50% outdoors', 'semi-outdoor', 'semi - outdoor', 'semi outdoor',
'semi-enclosed', 'semi-outdoor boiler', 'semi outdoor boiler',
'semi- outdoor', 'semi - outdoors', 'semi -outdoor'
'conven & semi-outdr', 'conv & semi-outdoor', 'conv & semi- outdoor',
'convent. semi-outdr', 'conv. semi outdoor', 'conv(u1)/semiod(u2)',
'conv u1/semi-od u2', 'conv-one blr-semi-od', 'convent semioutdoor',
'conv. u1/semi-od u2', 'conv - 1 blr semi od', 'conv. ui/semi-od u2',
'conv-1 blr semi-od', 'conven. semi-outdoor', 'conv semi-outdoor',
'u1-conv./u2-semi-od', 'u1-conv./u2-semi -od', 'convent. semi-outdoo',
'u1-conv. / u2-semi', 'conven & semi-outdr', 'semi -outdoor',
'outdr & conventnl', 'conven. full outdoor', 'conv. & outdoor blr',
'conv. & outdoor blr.', 'conv. & outdoor boil', 'conv. & outdr boiler',
'conv. & out. boiler', 'convntl,outdoor blr', 'outdoor & conv.',
'2 conv., 1 out. boil', 'outdoor/conventional', 'conv. boiler outdoor',
'conv-one boiler-outd', 'conventional outdoor', 'conventional outdor',
'conv. outdoor boiler', 'conv.outdoor boiler', 'conventional outdr.',
'conven,outdoorboiler', 'conven full outdoor', 'conven,full outdoor',
'1 out boil, 2 conv', 'conv. & full outdoor', 'conv. & outdr. boilr',
'conv outdoor boiler', 'convention. outdoor', 'conv. sem. outdoor',
'convntl, outdoor blr', 'conv & outdoor boil', 'conv & outdoor boil.',
'outdoor & conv', 'conv. broiler outdor', '1 out boilr, 2 conv',
'conv.& outdoor boil.', 'conven,outdr.boiler', 'conven,outdr boiler',
'outdoor & conventil', '1 out boilr 2 conv', 'conv & outdr. boilr',
'conven, full outdoor', 'conven full outdr.', 'conven, full outdr.',
'conv/outdoor boiler', "convnt'l outdr boilr", '1 out boil 2 conv',
'conv full outdoor', 'conven, outdr boiler', 'conventional/outdoor',
'conv&outdoor boiler', 'outdoor & convention', 'conv & outdoor boilr',
'conv & full outdoor', 'convntl. outdoor blr', 'conv - ob',
"1conv'l/2odboilers", "2conv'l/1odboiler", 'conv-ob', 'conv.-ob',
'1 conv/ 2odboilers', '2 conv /1 odboilers', 'conv- ob', 'conv -ob',
'con sem outdoor', 'cnvntl, outdr, boilr', 'less than 50% outdoo',
'under 50% outdoor', 'under 50% outdoors', '1cnvntnl/2odboilers',
'2cnvntnl1/1odboiler', 'con & ob', 'combination (b)', 'indoor & outdoor',
'conven. blr. & full', 'conv. & otdr. blr.', 'combination',
'indoor and outdoor', 'conven boiler & full', "2conv'l/10dboiler",
'4 indor/outdr boiler', '4 indr/outdr boilerr', '4 indr/outdr boiler',
'indoor & outdoof',
]
"""list: A list of strings from FERC Form 1 associated with the semi - outdoor
construction type, or a mix of conventional and outdoor construction.
"""
ferc1_const_type_conventional = [
'conventional', 'conventional', 'conventional boiler', 'conv-b',
'conventionall', 'convention', 'conventional', 'coventional',
'conven full boiler', 'c0nventional', 'conventtional', 'convential'
'underground', 'conventional bulb', 'conventrional',
'*conventional', 'convential', 'convetional', 'conventioanl',
'conventioinal', 'conventaional', 'indoor construction', 'convenional',
'conventional steam', 'conventinal', 'convntional', 'conventionl',
'conventionsl', 'conventiional', 'convntl steam plants', 'indoor const.',
'full indoor', 'indoor', 'indoor automatic', 'indoor boiler',
'(peak load) indoor', 'conventionl,indoor', 'conventionl, indoor',
'conventional, indoor', 'comb. cycle indoor', '3 indoor boiler',
'2 indoor boilers', '1 indoor boiler', '2 indoor boiler',
'3 indoor boilers', 'fully contained', 'conv - b', 'conventional/boiler',
'cnventional', 'comb. cycle indooor', 'sonventional',
]
"""list: A list of strings from FERC Form 1 associated with the conventional
construction type.
"""
# Making a dictionary of lists from the lists of construction_type strings to
# create a dictionary of construction type lists.
ferc1_const_type_strings = {
'outdoor': ferc1_const_type_outdoor,
'semioutdoor': ferc1_const_type_semioutdoor,
'conventional': ferc1_const_type_conventional,
}
"""dict: A dictionary of construction types (keys) and lists of construction
type strings associated with each type (values) from FERC Form 1.
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
ferc714_pudl_tables = (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
)
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data.
"""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
# patterns for matching columns to months:
month_dict_eia923 = {1: '_january$',
2: '_february$',
3: '_march$',
4: '_april$',
5: '_may$',
6: '_june$',
7: '_july$',
8: '_august$',
9: '_september$',
10: '_october$',
11: '_november$',
12: '_december$'}
"""dict: A dictionary mapping column numbers (keys) to months (values).
"""
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple containing the list of EIA 860 tables that can be
successfully pulled into PUDL.
"""
eia861_pudl_tables = (
"service_territory_eia861",
)
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OC': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [ # base cols
['plant_id_eia'],
# static cols
['balancing_authority_code', 'balancing_authority_name',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude',
'nerc_region', 'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'net_metering', 'pipeline_notes',
'regulatory_status_code', 'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
# {'plant_id_eia': 'int64',
# 'grid_voltage_2_kv': 'float64',
# 'grid_voltage_3_kv': 'float64',
# 'grid_voltage_kv': 'float64',
# 'longitude': 'float64',
# 'latitude': 'float64',
# 'primary_purpose_naics_id': 'float64',
# 'sector_id': 'float64',
# 'zip_code': 'float64',
# 'utility_id_eia': 'float64'},
],
'generators': [ # base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'deliver_power_transgrid', 'summer_capacity_mw',
'winter_capacity_mw', 'minimum_load_mw', 'technology_description',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date', 'utility_id_eia'],
# need type fixing
{}
# {'plant_id_eia': 'int64',
# 'generator_id': 'str'},
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [ # base cols
['utility_id_eia'],
# static cols
['utility_name_eia',
'entity_type'],
# annual cols
['street_address', 'city', 'state', 'zip_code',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [ # base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{}, ]}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
# EPA CEMS constants #####
epacems_rename_dict = {
"STATE": "state",
# "FACILITY_NAME": "plant_name", # Not reading from CSV
"ORISPL_CODE": "plant_id_eia",
"UNITID": "unitid",
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": "op_date",
"OP_HOUR": "op_hour",
"OP_TIME": "operating_time_hours",
"GLOAD (MW)": "gross_load_mw",
"GLOAD": "gross_load_mw",
"SLOAD (1000 lbs)": "steam_load_1000_lbs",
"SLOAD (1000lb/hr)": "steam_load_1000_lbs",
"SLOAD": "steam_load_1000_lbs",
"SO2_MASS (lbs)": "so2_mass_lbs",
"SO2_MASS": "so2_mass_lbs",
"SO2_MASS_MEASURE_FLG": "so2_mass_measurement_code",
# "SO2_RATE (lbs/mmBtu)": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE": "so2_rate_lbs_mmbtu", # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": "so2_rate_measure_flg", # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": "nox_rate_lbs_mmbtu",
"NOX_RATE": "nox_rate_lbs_mmbtu",
"NOX_RATE_MEASURE_FLG": "nox_rate_measurement_code",
"NOX_MASS (lbs)": "nox_mass_lbs",
"NOX_MASS": "nox_mass_lbs",
"NOX_MASS_MEASURE_FLG": "nox_mass_measurement_code",
"CO2_MASS (tons)": "co2_mass_tons",
"CO2_MASS": "co2_mass_tons",
"CO2_MASS_MEASURE_FLG": "co2_mass_measurement_code",
# "CO2_RATE (tons/mmBtu)": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE": "co2_rate_tons_mmbtu", # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": "co2_rate_measure_flg", # Not reading from CSV
"HEAT_INPUT (mmBtu)": "heat_content_mmbtu",
"HEAT_INPUT": "heat_content_mmbtu",
"FAC_ID": "facility_id",
"UNIT_ID": "unit_id_epa",
}
"""dict: A dictionary containing EPA CEMS column names (keys) and replacement
names to use when reading those columns into PUDL (values).
"""
# Any column that exactly matches one of these won't be read
epacems_columns_to_ignore = {
"FACILITY_NAME",
"SO2_RATE (lbs/mmBtu)",
"SO2_RATE",
"SO2_RATE_MEASURE_FLG",
"CO2_RATE (tons/mmBtu)",
"CO2_RATE",
"CO2_RATE_MEASURE_FLG",
}
"""set: The set of EPA CEMS columns to ignore when reading data.
"""
# Specify dtypes to for reading the CEMS CSVs
epacems_csv_dtypes = {
"STATE": pd.StringDtype(),
# "FACILITY_NAME": str, # Not reading from CSV
"ORISPL_CODE": pd.Int64Dtype(),
"UNITID": pd.StringDtype(),
# These op_date, op_hour, and op_time variables get converted to
# operating_date, operating_datetime and operating_time_interval in
# transform/epacems.py
"OP_DATE": pd.StringDtype(),
"OP_HOUR": pd.Int64Dtype(),
"OP_TIME": float,
"GLOAD (MW)": float,
"GLOAD": float,
"SLOAD (1000 lbs)": float,
"SLOAD (1000lb/hr)": float,
"SLOAD": float,
"SO2_MASS (lbs)": float,
"SO2_MASS": float,
"SO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "SO2_RATE (lbs/mmBtu)": float, # Not reading from CSV
# "SO2_RATE": float, # Not reading from CSV
# "SO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"NOX_RATE (lbs/mmBtu)": float,
"NOX_RATE": float,
"NOX_RATE_MEASURE_FLG": pd.StringDtype(),
"NOX_MASS (lbs)": float,
"NOX_MASS": float,
"NOX_MASS_MEASURE_FLG": pd.StringDtype(),
"CO2_MASS (tons)": float,
"CO2_MASS": float,
"CO2_MASS_MEASURE_FLG": pd.StringDtype(),
# "CO2_RATE (tons/mmBtu)": float, # Not reading from CSV
# "CO2_RATE": float, # Not reading from CSV
# "CO2_RATE_MEASURE_FLG": str, # Not reading from CSV
"HEAT_INPUT (mmBtu)": float,
"HEAT_INPUT": float,
"FAC_ID": pd.Int64Dtype(),
"UNIT_ID": pd.Int64Dtype(),
}
"""dict: A dictionary containing column names (keys) and data types (values)
for EPA CEMS.
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
epacems_additional_plant_info_file = importlib.resources.open_text(
'pudl.package_data.epa.cems', 'plant_info_for_additional_cems_plants.csv')
"""typing.TextIO:
Todo:
Return to
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
read_excel_epaipm_dict = {
'transmission_single_epaipm': dict(
skiprows=3,
usecols='B:F',
index_col=[0, 1],
),
'transmission_joint_epaipm': {},
'load_curves_epaipm': dict(
skiprows=3,
usecols='B:AB',
),
'plant_region_map_epaipm_active': dict(
sheet_name='NEEDS v6_Active',
usecols='C,I',
),
'plant_region_map_epaipm_retired': dict(
sheet_name='NEEDS v6_Retired_Through2021',
usecols='C,I',
),
}
"""
dict: A dictionary of dictionaries containing EPA IPM tables and associated
information for reading those tables into PUDL (values).
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2019)),
'eia861': tuple(range(1990, 2019)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_years = {
'eia860': tuple(range(2009, 2019)),
'eia861': tuple(range(1999, 2019)),
'eia923': tuple(range(2009, 2019)),
'epacems': tuple(range(1995, 2019)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2019)),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years for
each data source that are able to be ingested into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': eia861_pudl_tables,
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': ferc714_pudl_tables,
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "C<NAME>ooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
'notebook',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
'utility_id_ferc1': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_id_ferc1': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'report_year': pd.Int64Dtype(),
'report_date': 'datetime64[ns]',
},
"ferc714": { # INCOMPLETE
"report_year": pd.Int64Dtype(),
"utility_id_ferc714": pd.Int64Dtype(),
"utility_id_eia": pd.Int64Dtype(),
"utility_name_ferc714": pd.StringDtype(),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': | pd.StringDtype() | pandas.StringDtype |
import requests
import base64
import re
import os
import datetime
import random
import tensorflow as tf
import tensorflow_text
import numpy as np
import pandas as pd
import tweepy
import plotly.graph_objects as go
print("Imports done")
auth = tweepy.OAuthHandler(os.environ.get('API_KEY'), os.environ.get('API_SECRET'))
auth.set_access_token(os.environ.get('ACCESS_TOKEN'), os.environ.get('ACCESS_SECRET'))
api = tweepy.API(auth,wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
MAX_SEARCH = 200
MEDIA_LINK = 'https://unionpoll.com/wp-json/wp/v2/media/'
bert_model_path = "sentiment140_bert"
bert_model = tf.saved_model.load(bert_model_path)
print("Sentiment model loaded")
def bert_preprocess(text):
pat1 = r'@[A-Za-z0-9]+'
pat2 = r'https?://[A-Za-z0-9./]+'
combined_pat = r'|'.join((pat1, pat2))
stripped = re.sub(combined_pat, '', text)
try:
clean = stripped.decode("utf-8-sig").replace(u"\ufffd", "?")
except:
clean = stripped
letters_only = re.sub("[^a-zA-Z]", " ", clean)
lower_case = letters_only.lower()
# During the letters_only process two lines above, it has created unnecessay white spaces,
# I will tokenize and join together to remove unneccessary white spaces
return lower_case.strip()
preprocess = np.vectorize(bert_preprocess)
def new_val(prob):
if prob > 0.75:
return 1
elif prob < 0.25:
return 0
else:
return 0.5
reformat = np.vectorize(new_val)
def getSentiments(queries,user_query):
"""
Input: Queries
Returns: List of sentiments
"""
thirty_earlier = datetime.datetime.utcnow()-datetime.timedelta(30)
tweets = []
indices = []
ind = 0
sentiments = []
for query in queries:
indices.append(ind)
if query is not None:
print(query)
if user_query:
statuses = tweepy.Cursor(api.user_timeline,id=query).items()
else:
statuses = tweepy.Cursor(api.search, q=query).items(MAX_SEARCH)
for status in statuses:
if status.created_at > thirty_earlier:
tweets.append(status.text)
ind += 1
else:
break
indices.append(ind)
print("Preprocessing tweets")
preprocessed = preprocess(np.array(tweets))
print("Making predictions")
predictions = reformat(tf.sigmoid(bert_model(tf.constant(preprocessed))))
for i in range(len(queries)):
if indices[i+1] == indices[i]:
sentiments.append(np.nan)
else:
sentiments.append(int(round(np.mean(predictions[indices[i]:indices[i+1]])*1000)))
return sentiments
if __name__=='__main__':
user = os.environ.get('WP_USER')
password = os.environ.get('WP_PASSWORD')
credentials = user + ':' + password
token = base64.b64encode(credentials.encode())
header = {'Authorization': 'Basic ' + token.decode('utf-8')}
response = requests.get(MEDIA_LINK)
files_total = response.json()
files_needed = ['users', 'searches']
for file in files_needed:
queries_needed = []
with open(file+'.txt', 'r') as f:
for line in f:
queries_needed.append(line.strip())
found = False
for media in files_total:
if file + '.csv' in media['source_url']:
id = media['id']
file_url = media['source_url']
found = True
break
assert found, "csv file not found"
print("Retrieving file from "+file_url)
df = | pd.read_csv(file_url) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import os
import pandas as pd
import numpy as np
import string
# from operator import itemgetter
from collections import Counter, OrderedDict
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.stem import SnowballStemmer
from nltk.corpus import stopwords
import nltk
#nltk.download('punkt')
#nltk.download('stopwords')
from gensim.models.phrases import Phrases, Phraser
from gensim.models import Word2Vec
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
import traceback
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
# First, import the wine dataset.
# In[3]:
base_location = r"wine_data"
i = 0
for file in os.listdir(base_location):
file_location = base_location + '/' + str(file)
if i==0:
wine_dataframe = pd.read_csv(file_location, encoding='latin-1')
i+=1
else:
df_to_append = pd.read_csv(file_location, encoding='latin-1', low_memory=False)
wine_dataframe = pd.concat([wine_dataframe, df_to_append], axis=0)
# In[4]:
#wine_dataframe.drop_duplicates(subset=['Name'], inplace=True)
geographies = ['Subregion', 'Region', 'Province', 'Country']
for geo in geographies:
wine_dataframe[geo] = wine_dataframe[geo].apply(lambda x : str(x).strip())
# Then, the food dataset.
# In[5]:
food_review_dataset = pd.read_csv('food_data/Reviews.csv')
print(food_review_dataset.shape)
# ### 1. Training our Word Embeddings
#
# First, we need to train a Word2Vec model on all the words in our corpus. We will process our wine and food terms separately - some of the wine terms will be standardized to account for commonalities in the colorful language of the world of wine.
# In[6]:
wine_reviews_list = list(wine_dataframe['Description'])
food_reviews_list = list(food_review_dataset['Text'])
# To begin, we need to tokenize the terms in our corpus (wine and food).
# In[7]:
full_wine_reviews_list = [str(r) for r in wine_reviews_list]
full_wine_corpus = ' '.join(full_wine_reviews_list)
wine_sentences_tokenized = sent_tokenize(full_wine_corpus)
full_food_reviews_list = [str(r) for r in food_reviews_list]
full_food_corpus = ' '.join(full_food_reviews_list)
food_sentences_tokenized = sent_tokenize(full_food_corpus)
#print(wine_sentences_tokenized[:2])
#print(food_sentences_tokenized[:2])
# Next, the text in each sentence is normalized (tokenize, remove punctuation and remove stopwords).
# In[8]:
stop_words = set(stopwords.words('english'))
punctuation_table = str.maketrans({key: None for key in string.punctuation})
sno = SnowballStemmer('english')
def normalize_text(raw_text):
try:
word_list = word_tokenize(raw_text)
normalized_sentence = []
for w in word_list:
try:
w = str(w)
lower_case_word = str.lower(w)
stemmed_word = sno.stem(lower_case_word)
no_punctuation = stemmed_word.translate(punctuation_table)
if len(no_punctuation) > 1 and no_punctuation not in stop_words:
normalized_sentence.append(no_punctuation)
except:
continue
return normalized_sentence
except:
return ''
normalized_wine_sentences = []
for s in wine_sentences_tokenized:
normalized_text = normalize_text(s)
normalized_wine_sentences.append(normalized_text)
normalized_food_sentences = []
for s in food_sentences_tokenized:
normalized_text = normalize_text(s)
normalized_food_sentences.append(normalized_text)
#print(normalized_wine_sentences[:2])
#print(normalized_food_sentences[:2])
# Not all of the terms we are interested in are single words. Some of the terms are phrases, consisting of two (or more!) words. An example of this might be 'high tannin'. We can use gensim's Phrases feature to extract all the most relevant bi- and tri-grams from our corpus.
#
# We will train a separate trigram model for wine and for food.
# In[9]:
# first, take care of the wine trigrams
wine_bigram_model = Phrases(normalized_wine_sentences, min_count=100)
wine_bigrams = [wine_bigram_model[line] for line in normalized_wine_sentences]
wine_trigram_model = Phrases(wine_bigrams, min_count=50)
phrased_wine_sentences = [wine_trigram_model[line] for line in wine_bigrams]
wine_trigram_model.save('wine_trigrams.pkl')
### now, do the same for food
food_bigram_model = Phrases(normalized_food_sentences, min_count=100)
food_bigrams = [food_bigram_model[sent] for sent in normalized_food_sentences]
food_trigram_model = Phrases(food_bigrams, min_count=50)
phrased_food_sentences = [food_trigram_model[sent] for sent in food_bigrams]
food_trigram_model.save('food_trigrams.pkl')
wine_trigram_model = Phraser.load('wine_trigrams.pkl')
food_trigram_model = Phraser.load('food_trigrams.pkl')
descriptor_mapping = pd.read_csv('descriptor_mapping.csv', encoding='latin1').set_index('raw descriptor')
def return_mapped_descriptor(word, mapping):
if word in list(mapping.index):
normalized_word = mapping.at[word, 'level_3']
return normalized_word
else:
return word
normalized_wine_sentences = []
for sent in phrased_wine_sentences:
normalized_wine_sentence = []
for word in sent:
normalized_word = return_mapped_descriptor(word, descriptor_mapping)
normalized_wine_sentence.append(str(normalized_word))
normalized_wine_sentences.append(normalized_wine_sentence)
# If the trigram model has already been trained, simply retrieve it.
# In[10]:
#wine_trigram_model = Phraser.load('wine_trigrams.pkl')
#food_trigram_model = Phraser.load('food_trigrams.pkl')
# Now for the most important part: leveraging existing wine theory, the work of others like <NAME>, wine descriptor mappings and the UC Davis wine wheel, the top 5000 most frequent wine terms were reviewed to (i) determine whether they are a descriptor that can be derived by blind tasting, and (ii) whether they are informative (judgments like 'tasty' and 'great' are not considered to be informative). The roughly 1000 descriptors that remain were then mapped onto a normalized descriptor, a category and a class:
# In[11]:
#descriptor_mapping = pd.read_csv('descriptor_mapping.csv', encoding='latin1').set_index('raw descriptor')
#def return_mapped_descriptor(word, mapping):
# if word in list(mapping.index):
# normalized_word = mapping.at[word, 'level_3']
# return normalized_word
# else:
# return word
#normalized_wine_sentences = []
#for sent in phrased_wine_sentences:
# normalized_wine_sentence = []
# for word in sent:
# normalized_word = return_mapped_descriptor(word, descriptor_mapping)
# normalized_wine_sentence.append(str(normalized_word))
# normalized_wine_sentences.append(normalized_wine_sentence)
# We will go through the same process for food, but without normalizing the nonaroma descriptors.
# In[12]:
aroma_descriptor_mapping = descriptor_mapping.loc[descriptor_mapping['type'] == 'aroma']
#print(aroma_descriptor_mapping)
normalized_food_sentences = []
for sent in phrased_food_sentences:
normalized_food_sentence = []
for word in sent:
normalized_word = return_mapped_descriptor(word, aroma_descriptor_mapping)
normalized_food_sentence.append(str(normalized_word))
normalized_food_sentences.append(normalized_food_sentence)
# Now, let's combine the wine dataset with our food dataset so we can train our embeddings. We want to make sure that the food and wine embeddings are calculated in the same feature space so that we can compute similarity vectors later on.
# In[13]:
normalized_sentences = normalized_wine_sentences + normalized_food_sentences
# In[98]:
normalized_sentences
# We are ready to train our Word2Vec model!
# In[14]:
#Changed by Praveen - vector_size and epochs added)
wine_word2vec_model = Word2Vec(normalized_sentences, vector_size=300, min_count=8, epochs=15)
print(wine_word2vec_model)
wine_word2vec_model.save('food_word2vec_model.bin')
# In[ ]:
wine_word2vec_model
# In[15]:
# if the word2vec model has already been trained, simply load it
wine_word2vec_model = Word2Vec.load("food_word2vec_model.bin")
# ### 2. Preprocessing our Wine Dataset
#
# We can now turn our attention to our wine dataset. Descriptions for a single wine are unlikely to contain sufficient information about all the nonaromas and aromas to yield consistent and reliable pairing recommendations. As such, we will produce recommendations at the grape variety & subregion level.
#
# First, let's normalize the names of the grape varieties in our dataset.
# In[16]:
variety_mapping = {'Shiraz': 'Syrah', 'Pinot Gris': 'Pinot Grigio', 'Pinot Grigio/Gris': 'Pinot Grigio',
'Garnacha, Grenache': 'Grenache', 'Garnacha': 'Grenache', 'Carmenère': 'Carmenere',
'Grüner Veltliner': 'Gruner Veltliner', 'Torrontés': 'Torrontes',
'Rhône-style Red Blend': 'Rhone-style Red Blend', 'Albariño': 'Albarino',
'Gewürztraminer': 'Gewurztraminer', 'Rhône-style White Blend': 'Rhone-style White Blend',
'Spätburgunder, Pinot Noir': 'Pinot Noir', 'Sauvignon, Sauvignon Blanc': 'Sauvignon Blanc',
'Pinot Nero, Pinot Noir': 'Pinot Noir', 'Malbec-Merlot, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend',
'Meritage, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend', 'Garnacha, Grenache': 'Grenache',
'Fumé Blanc': 'Sauvignon Blanc', 'Cabernet Sauvignon-Cabernet Franc, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend',
'Cabernet Merlot, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend', 'Cabernet Sauvignon-Merlot, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend',
'Cabernet Blend, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend', 'Malbec-Cabernet Sauvignon, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend',
'Merlot-<NAME>, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend', 'Merlot-<NAME>, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend',
'Cabernet Franc-Merlot, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend', 'Merlot-Malbec, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend',
'Cabernet, Bordeaux-style Red Blend': 'Bordeaux-style Red Blend', 'Primitivo, Zinfandel': 'Zinfandel',
'Aragonês, Tempranillo': 'Aragonez, Tempranillo'
}
def consolidate_varieties(variety_name):
if variety_name in variety_mapping:
return variety_mapping[variety_name]
else:
return variety_name
wine_df_clean = wine_dataframe.copy()
wine_df_clean['Variety'] = wine_df_clean['Variety'].apply(consolidate_varieties)
# Next, we need to define the set of geography subregions we will use to define our wines. Not too general, not too specific... just right.
# In[17]:
order_of_geographies = ['Subregion', 'Region', 'Province', 'Country']
# replace any nan values in the geography columns with the word none
def replace_nan_for_zero(value):
if str(value) == '0' or str(value) == 'nan':
return 'none'
else:
return value
for o in order_of_geographies:
wine_df_clean[o] = wine_df_clean[o].apply(replace_nan_for_zero)
wine_df_clean.loc[:, order_of_geographies].fillna('none', inplace=True)
# In[18]:
variety_geo = wine_df_clean.groupby(['Variety', 'Country', 'Province', 'Region', 'Subregion']).size().reset_index().rename(columns={0:'count'})
variety_geo_sliced = variety_geo.loc[variety_geo['count'] > 1]
vgeos_df = pd.DataFrame(variety_geo_sliced, columns=['Variety', 'Country', 'Province', 'Region', 'Subregion', 'count'])
vgeos_df.to_csv('varieties_all_geos.csv')
# In[19]:
variety_geo_df = pd.read_csv('varieties_all_geos_normalized.csv', index_col=0)
wine_df_merged = pd.merge(left=wine_df_clean, right=variety_geo_df, left_on=['Variety', 'Country', 'Province', 'Region', 'Subregion'],
right_on=['Variety', 'Country', 'Province', 'Region', 'Subregion'])
#wine_df_merged.drop(['Unnamed: 0', 'Appellation', 'Bottle Size', 'Category', 'Country',
# 'Date Published', 'Designation', 'Importer', 'Province', 'Rating',
# 'Region', 'Reviewer', 'Reviewer Twitter Handle', 'Subregion', 'User Avg Rating', 'Winery', 'count'],
# axis=1, inplace=True)
wine_df_merged.shape
# We only want to keep wine types (location + variety) that appear frequently enough in our dataset.
# In[20]:
variety_geos = wine_df_merged.groupby(['Variety', 'geo_normalized']).size()
at_least_n_types = variety_geos[variety_geos > 30].reset_index()
wine_df_merged_filtered = pd.merge(wine_df_merged, at_least_n_types, left_on=['Variety', 'geo_normalized'], right_on=['Variety', 'geo_normalized'])
#wine_df_merged_filtered = wine_df_merged_filtered[['Name', 'Variety', 'geo_normalized', 'Description']]
wine_df_merged_filtered = wine_df_merged_filtered[['Variety', 'geo_normalized', 'Description']]
print(wine_df_merged_filtered.shape)
# Now, we will extract 7 vectors for every wine:
#
# - aroma vector (the aggregate of all the aroma descriptors in a wine)
# - nonaroma vectors (an aggregate vector for only aroma & non-aroma descriptors matching the core tastes below):
# - sweetness
# - acid
# - salt
# - piquant
# - fat
# - bitter
#
# In our descriptor file, we have defined which normalized descriptors pertain to each nonaroma.
# In[173]:
wine_reviews = list(wine_df_merged_filtered['Description'])
descriptor_mapping = pd.read_csv('descriptor_mapping_tastes - Copy.csv', encoding='latin1').set_index('raw descriptor')
core_tastes = ['aroma', 'weight', 'sweet', 'acid', 'salt', 'piquant', 'fat', 'bitter']
descriptor_mappings = dict()
for c in core_tastes:
if c=='aroma':
descriptor_mapping_filtered=descriptor_mapping.loc[descriptor_mapping['type']=='aroma']
else:
descriptor_mapping_filtered=descriptor_mapping.loc[descriptor_mapping['primary taste']==c]
descriptor_mappings[c] = descriptor_mapping_filtered
def return_descriptor_from_mapping(descriptor_mapping, word, core_taste):
if word in list(descriptor_mapping.index):
descriptor_to_return = descriptor_mapping['combined'][word]
return descriptor_to_return
else:
return None
review_descriptors = []
for review in wine_reviews:
taste_descriptors = []
normalized_review = normalize_text(review)
phrased_review = wine_trigram_model[normalized_review]
#print("normalized_review : ", normalized_review)
for c in core_tastes:
descriptors_only = [return_descriptor_from_mapping(descriptor_mappings[c], word, c) for word in phrased_review]
no_nones = [str(d).strip() for d in descriptors_only if d is not None]
descriptorized_review = ' '.join(no_nones)
taste_descriptors.append(descriptorized_review)
review_descriptors.append(taste_descriptors)
# In[174]:
#print("Vector of word salinity : ", wine_word2vec_model.wv['salility'])
#np.zeros(1)
# Now we will take the list of descriptors for each wine and its aroma/nonaroma vectors and compute a TF-IDF weighted embedding for each. We will store the results in a dataframe.
# In[175]:
taste_descriptors = []
taste_vectors = []
for n, taste in enumerate(core_tastes):
print("taste : ",taste)
taste_words = [r[n] for r in review_descriptors]
#if taste == 'salt':
# print("salt taste_words : ", taste_words)
vectorizer = TfidfVectorizer()
X = vectorizer.fit(taste_words)
#print("Feature names : ", X.get_feature_names())
#print("IDF : ", X.idf_)
dict_of_tfidf_weightings = dict(zip(X.get_feature_names(), X.idf_))
#print("dict_of_tfidf_weightings keys : ", dict_of_tfidf_weightings.keys())
wine_review_descriptors = []
wine_review_vectors = []
for d in taste_words:
descriptor_count = 0
weighted_review_terms = []
terms = d.split(' ')
#if taste == 'salt':
# print("Salt terms : ", terms)
for term in terms:
if term in dict_of_tfidf_weightings.keys():
tfidf_weighting = dict_of_tfidf_weightings[term]
try:
#if taste == 'salt':
# print("------ tfidf_weighting : ", tfidf_weighting, " => ",term)
# print("vector without reshape : ", wine_word2vec_model.wv.get_vector(term))
word_vector = wine_word2vec_model.wv.get_vector(term).reshape(1, 300)
weighted_word_vector = tfidf_weighting * word_vector
weighted_review_terms.append(weighted_word_vector)
descriptor_count += 1
except:
if taste == 'bitter':
print("term : ", term)
traceback.print_exc()
continue
else:
continue
try:
#review_vector = ((sum(weighted_review_terms)/len(weighted_review_terms)).reshape(-1,1)).round(3)
review_vector = sum(weighted_review_terms)/len(weighted_review_terms)
review_vector = review_vector[0]
except:
#traceback.print_exc()
review_vector = np.nan
#if taste == 'salt' or taste == 'bitter':
# review_vector = np.ones(1)
# terms_and_vec = [terms, review_vector]
#if taste == 'acid':
# print("acid review_vector : ", review_vector)
wine_review_vectors.append(review_vector)
wine_review_descriptors.append(terms)
#if taste == 'salt':
# wine_review_vectors.reshape(1,300)
taste_vectors.append(wine_review_vectors)
taste_descriptors.append(wine_review_descriptors)
taste_vectors_t = list(map(list, zip(*taste_vectors)))
taste_descriptors_t = list(map(list, zip(*taste_descriptors)))
review_vecs_df = pd.DataFrame(taste_vectors_t, columns=core_tastes)
columns_taste_descriptors = [a + '_descriptors' for a in core_tastes]
review_descriptors_df = pd.DataFrame(taste_descriptors_t, columns=columns_taste_descriptors)
wine_df_vecs = pd.concat([wine_df_merged_filtered, review_descriptors_df, review_vecs_df], axis=1)
wine_df_vecs.head(5)
# If we don't have a nonaroma embedding for one of the wines, we will simply take the average nonaroma embedding for all the wines in the dataset.
# In[176]:
# pull the average embedding for the wine attribute across all wines.
avg_taste_vecs = dict()
for t in core_tastes:
# look at the average embedding for a taste, across all wines that have descriptors for that taste
review_arrays = wine_df_vecs[t].dropna()
#print("review_arrays : ", review_arrays)
average_taste_vec = np.average(review_arrays)
avg_taste_vecs[t] = average_taste_vec
# Now, let's find the average embedding for each type of wine (aromas and all nonaromas). We have defined the different types of wines by grape variety and geography, keeping only those with a sufficiently large sample size.
#
# For each variety, we will pull (i) a 300-dimensional aroma vector, and (ii) 7 non-aroma scalars.
# In[177]:
normalized_geos = list(set(zip(wine_df_vecs['Variety'], wine_df_vecs['geo_normalized'])))
def subset_wine_vectors(list_of_varieties, wine_attribute):
wine_variety_vectors = []
for v in list_of_varieties:
one_var_only = wine_df_vecs.loc[(wine_df_vecs['Variety'] == v[0]) &
(wine_df_vecs['geo_normalized'] == v[1])]
if len(list(one_var_only.index)) < 1 or str(v[1][-1]) == '0':
continue
else:
taste_vecs = list(one_var_only[wine_attribute])
taste_vecs = [avg_taste_vecs[wine_attribute] if 'numpy' not in str(type(x)) else x for x in taste_vecs]
average_variety_vec = np.average(taste_vecs, axis=0)
descriptor_colname = wine_attribute + '_descriptors'
all_descriptors = [i[0] for i in list(one_var_only[descriptor_colname])]
word_freqs = Counter(all_descriptors)
most_common_words = word_freqs.most_common(50)
top_n_words = [(i[0], "{:.2f}".format(i[1]/len(taste_vecs))) for i in most_common_words]
top_n_words = [i for i in top_n_words if len(i[0])>2]
wine_variety_vector = [v, average_variety_vec, top_n_words]
wine_variety_vectors.append(wine_variety_vector)
return wine_variety_vectors
def pca_wine_variety(list_of_varieties, wine_attribute, pca=True):
wine_var_vectors = subset_wine_vectors(normalized_geos, wine_attribute)
wine_varieties = [str(w[0]).replace('(', '').replace(')', '').replace("'", '').replace('"', '') for w in wine_var_vectors]
wine_var_vec = [w[1] for w in wine_var_vectors]
print("Type of wine_var_vec : ",type(wine_var_vec) , ", wine_attribute : ", wine_attribute)
#if pca and wine_attribute != 'salt' and wine_attribute != 'bitter':
if pca:
pca = PCA(1)
#below one line newly added by praveen(trying to resolve issue)
#wine_var_vec = np.array(wine_var_vec).reshape(-1, 1)
#print("wine_var_vec : ", wine_var_vec)
wine_var_vec = pca.fit_transform(wine_var_vec)
wine_var_vec = pd.DataFrame(wine_var_vec, index=wine_varieties)
else:
wine_var_vec = pd.Series(wine_var_vec, index=wine_varieties)
wine_var_vec.sort_index(inplace=True)
wine_descriptors = pd.DataFrame([w[2] for w in wine_var_vectors], index=wine_varieties)
wine_descriptors = pd.melt(wine_descriptors.reset_index(), id_vars='index')
wine_descriptors.sort_index(inplace=True)
return wine_var_vec, wine_descriptors
taste_dataframes = []
# generate the dataframe of aromas vectors as output,
print("normalized_geos Type1: ", type(normalized_geos))
aroma_vec, aroma_descriptors = pca_wine_variety(normalized_geos, 'aroma', pca=False)
taste_dataframes.append(aroma_vec)
#print(taste_dataframes)
print(core_tastes)
#print(normalized_geos)
# generate the dataframes of nonaroma scalars
for tw in core_tastes[1:]:
#TODO: below one line newly added by praveen, Pca was True, changed to False
print("normalized_geos Type: ", type(normalized_geos))
pca_w_dataframe, nonaroma_descriptors = pca_wine_variety(normalized_geos, tw, pca=True)
taste_dataframes.append(pca_w_dataframe)
# combine all the dataframes created above into one
all_nonaromas = | pd.concat(taste_dataframes, axis=1) | pandas.concat |
import pytest
import numpy as np
import pandas as pd
import databricks.koalas as ks
from pandas.testing import assert_frame_equal
from gators.feature_generation.polynomial_features import PolynomialFeatures
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture
def data_inter():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=True, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2.],
[3., 4., 5., 12., 15., 20.],
[6., 7., 8., 42., 48., 56.]]),
columns=['A', 'B', 'C', 'A__x__B', 'A__x__C', 'B__x__C']
)
return obj, X, X_expected
@pytest.fixture
def data_int16_inter():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.int16)
obj = PolynomialFeatures(
interaction_only=True, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2.],
[3., 4., 5., 12., 15., 20.],
[6., 7., 8., 42., 48., 56.]]),
columns=['A', 'B', 'C', 'A__x__B', 'A__x__C', 'B__x__C']
).astype(np.int16)
return obj, X, X_expected
@ pytest.fixture
def data_all():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float32)
obj = PolynomialFeatures(
interaction_only=False, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 0., 1., 2., 4.],
[3., 4., 5., 9., 12., 15., 16., 20., 25.],
[6., 7., 8., 36., 42., 48., 49., 56., 64.]]),
columns=['A', 'B', 'C', 'A__x__A', 'A__x__B',
'A__x__C', 'B__x__B', 'B__x__C', 'C__x__C']
).astype(np.float32)
return obj, X, X_expected
@ pytest.fixture
def data_degree():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=False, degree=3, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 0., 1., 2., 4., 0., 0.,
0., 0., 0., 0., 1., 2., 4., 8.],
[3., 4., 5., 9., 12., 15., 16., 20., 25., 27., 36.,
45., 48., 60., 75., 64., 80., 100., 125.],
[6., 7., 8., 36., 42., 48., 49., 56., 64., 216., 252.,
288., 294., 336., 384., 343., 392., 448., 512.]]),
columns=['A', 'B', 'C', 'A__x__A', 'A__x__B', 'A__x__C', 'B__x__B', 'B__x__C', 'C__x__C',
'A__x__A__x__A', 'A__x__A__x__B', 'A__x__A__x__C', 'A__x__B__x__B', 'A__x__B__x__C',
'A__x__C__x__C', 'B__x__B__x__B', 'B__x__B__x__C', 'B__x__C__x__C', 'C__x__C__x__C']
)
return obj, X, X_expected
@ pytest.fixture
def data_inter_degree():
X = pd.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=True, degree=3, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2., 0.],
[3., 4., 5., 12., 15., 20., 60.],
[6., 7., 8., 42., 48., 56., 336.]]),
columns=['A', 'B', 'C', 'A__x__B',
'A__x__C', 'B__x__C', 'A__x__B__x__C']
)
return obj, X, X_expected
@ pytest.fixture
def data_subset():
X = pd.DataFrame(np.arange(12).reshape(
3, 4), columns=list('ABCD'), dtype=np.float64)
obj = PolynomialFeatures(
columns=['A', 'B', 'C'], interaction_only=True, degree=2).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 3., 0., 0., 2.],
[4., 5., 6., 7., 20., 24., 30.],
[8., 9., 10., 11., 72., 80., 90.]]),
columns=['A', 'B', 'C', 'D', 'A__x__B', 'A__x__C', 'B__x__C']
)
return obj, X, X_expected
@pytest.fixture
def data_inter_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=True, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2.],
[3., 4., 5., 12., 15., 20.],
[6., 7., 8., 42., 48., 56.]]),
columns=['A', 'B', 'C', 'A__x__B', 'A__x__C', 'B__x__C']
)
return obj, X, X_expected
@pytest.fixture
def data_int16_inter_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.int16)
obj = PolynomialFeatures(
interaction_only=True, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2.],
[3., 4., 5., 12., 15., 20.],
[6., 7., 8., 42., 48., 56.]]),
columns=['A', 'B', 'C', 'A__x__B', 'A__x__C', 'B__x__C']
).astype(np.int16)
return obj, X, X_expected
@ pytest.fixture
def data_all_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float32)
obj = PolynomialFeatures(
interaction_only=False, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 0., 1., 2., 4.],
[3., 4., 5., 9., 12., 15., 16., 20., 25.],
[6., 7., 8., 36., 42., 48., 49., 56., 64.]]),
columns=['A', 'B', 'C', 'A__x__A', 'A__x__B',
'A__x__C', 'B__x__B', 'B__x__C', 'C__x__C']
).astype(np.float32)
return obj, X, X_expected
@ pytest.fixture
def data_degree_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=False, degree=3, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 0., 1., 2., 4., 0., 0.,
0., 0., 0., 0., 1., 2., 4., 8.],
[3., 4., 5., 9., 12., 15., 16., 20., 25., 27., 36.,
45., 48., 60., 75., 64., 80., 100., 125.],
[6., 7., 8., 36., 42., 48., 49., 56., 64., 216., 252.,
288., 294., 336., 384., 343., 392., 448., 512.]]),
columns=['A', 'B', 'C', 'A__x__A', 'A__x__B', 'A__x__C', 'B__x__B', 'B__x__C', 'C__x__C',
'A__x__A__x__A', 'A__x__A__x__B', 'A__x__A__x__C', 'A__x__B__x__B', 'A__x__B__x__C',
'A__x__C__x__C', 'B__x__B__x__B', 'B__x__B__x__C', 'B__x__C__x__C', 'C__x__C__x__C']
)
return obj, X, X_expected
@ pytest.fixture
def data_inter_degree_ks():
X = ks.DataFrame(np.arange(9).reshape(
3, 3), columns=list('ABC'), dtype=np.float64)
obj = PolynomialFeatures(
interaction_only=True, degree=3, columns=['A', 'B', 'C']).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 0., 0., 2., 0.],
[3., 4., 5., 12., 15., 20., 60.],
[6., 7., 8., 42., 48., 56., 336.]]),
columns=['A', 'B', 'C', 'A__x__B',
'A__x__C', 'B__x__C', 'A__x__B__x__C']
)
return obj, X, X_expected
@ pytest.fixture
def data_subset_ks():
X = ks.DataFrame(np.arange(12).reshape(
3, 4), columns=list('ABCD'), dtype=np.float64)
obj = PolynomialFeatures(
columns=['A', 'B', 'C'], interaction_only=True, degree=2).fit(X)
X_expected = pd.DataFrame(np.array(
[[0., 1., 2., 3., 0., 0., 2.],
[4., 5., 6., 7., 20., 24., 30.],
[8., 9., 10., 11., 72., 80., 90.]]),
columns=['A', 'B', 'C', 'D', 'A__x__B', 'A__x__C', 'B__x__C']
)
return obj, X, X_expected
def test_inter_pd(data_inter):
obj, X, X_expected = data_inter
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_inter_ks(data_inter_ks):
obj, X, X_expected = data_inter_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_inter_pd_np(data_inter):
obj, X, X_expected = data_inter
X_new = obj.transform_numpy(X.to_numpy())
assert np.allclose(X_new, X_expected)
@pytest.mark.koalas
def test_inter_ks_np(data_inter_ks):
obj, X, X_expected = data_inter_ks
X_new = obj.transform_numpy(X.to_numpy())
assert np.allclose(X_new, X_expected)
def test_int16_inter_pd(data_int16_inter):
obj, X, X_expected = data_int16_inter
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_int16_inter_ks(data_int16_inter_ks):
obj, X, X_expected = data_int16_inter_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_int16_inter_pd_np(data_int16_inter):
obj, X, X_expected = data_int16_inter
X_new = obj.transform_numpy(X.to_numpy())
assert np.allclose(X_new, X_expected)
@pytest.mark.koalas
def test_int16_inter_ks_np(data_int16_inter_ks):
obj, X, X_expected = data_int16_inter_ks
X_new = obj.transform_numpy(X.to_numpy())
assert np.allclose(X_new, X_expected)
def test_all_pd(data_all):
obj, X, X_expected = data_all
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_all_ks(data_all_ks):
obj, X, X_expected = data_all_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_all_pd_np(data_all):
obj, X, X_expected = data_all
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_all_ks_np(data_all_ks):
obj, X, X_expected = data_all_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_degree_pd(data_degree):
obj, X, X_expected = data_degree
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_degree_ks(data_degree_ks):
obj, X, X_expected = data_degree_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_degree_pd_np(data_degree):
obj, X, X_expected = data_degree
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_degree_ks_np(data_degree_ks):
obj, X, X_expected = data_degree_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_inter_degree_pd(data_inter_degree):
obj, X, X_expected = data_inter_degree
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_inter_degree_ks(data_inter_degree_ks):
obj, X, X_expected = data_inter_degree_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_inter_degree_pd_np(data_inter_degree):
obj, X, X_expected = data_inter_degree
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_inter_degree_ks_np(data_inter_degree_ks):
obj, X, X_expected = data_inter_degree_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_subset_pd(data_subset):
obj, X, X_expected = data_subset
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_subset_ks(data_subset_ks):
obj, X, X_expected = data_subset_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_subset_pd_np(data_subset):
obj, X, X_expected = data_subset
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
| assert_frame_equal(X_new, X_expected) | pandas.testing.assert_frame_equal |
#Library of functions called by SimpleBuildingEngine
import pandas as pd
import numpy as np
def WALLS(Btest=None):
#Building height
h_building = 2.7#[m]
h_m_building = h_building / 2
h_cl = 2.7# heigth of a storey
#number of walls
n_walls = 7
A_fl = 48
#WALLS CHARACTERISTICS
#Orientation
ori = pd.Series([('S'), ('W'), ('N'), ('E'), ('R'), ('F'), ('C')])
#Surface azimuth
surf_az = pd.Series([0, 90, 180 - 90, 0, 0, 0])
#Slopes (90:vertical; 0:horizontal)
slope = pd.Series([90, 90, 90, 90, 0, 0, 0])
#Masks
f_low_diff = pd.Series([1, 1, 1, 1, 1, 1, 1])
f_low_dir = pd.Series([1, 1, 1, 1, 1, 1, 1])
#U VALUES
U_hopw = pd.Series([0.5144, 0.5144, 0.5144, 0.5144, 0.3177, 0, 0])
U_lopw = pd.Series([3, 3, 3, 3, 3, 3, 3])
U_fr = pd.Series([2.4, 2.4, 2.4, 2.4, 2.4, 2.4, 2.4])
U_gl = pd.Series([3, 3, 3, 3, 3, 3, 3])
if (Btest == 195 or Btest == 395):
#SURFACES
#Heavy Opaque walls
A_hopw = pd.Series([21.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
elif (Btest == 200 or Btest == 210 or Btest == 230 or Btest == 240 or Btest == 250 or Btest == 400 or Btest == 410
or Btest == 420 or Btest == 430 or Btest == 800):
#Heavy Opaque walls
A_hopw = pd.Series([9.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = pd.Series([12, 0, 0, 0, 0, 0, 0])
elif (Btest == 270 or Btest == 320 or Btest == 600 or Btest == 640 or Btest == 650 or Btest == 810 or Btest == 900
or Btest == 940 or Btest == 950 or Btest == 6001 or Btest == 9001 or Btest == 6501 or Btest == 9501):
#Heavy Opaque walls
A_hopw = pd.Series([9.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([12, 0, 0, 0, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
elif (Btest == 300 or Btest == 620 or Btest == 920):
#Heavy Opaque walls
A_hopw = pd.Series([9.6, 16.2, 21.6, 16.2, 48, 48, 48])
#Windows
A_wd = pd.Series([0, 6, 0, 6, 0, 0, 0])
#Frame
FWR = pd.Series([0, 0, 0, 0, 0, 0, 0])
A_fr = FWR * A_wd
#Glazing
A_gl = A_wd - A_fr
#Light Opaque walls
A_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Total
A_hopw_t = A_hopw.sum()
A_wd_t = A_wd.sum()
A_fr_t = A_fr.sum()
A_lopw_t = A_lopw.sum()
A_gl_t = max(0, A_wd_t - A_fr_t)
A_t = A_hopw_t + A_lopw_t + A_wd_t + A_fr_t
#CAPACITIES
if (Btest == 800 or Btest == 900 or Btest == 920 or Btest == 940 or Btest == 950 or Btest == 9001 or Btest == 9501):
C_hopw = ([145154, 145154, 145154, 145154, 18170, 112121, 0])
C_lopw = ([0, 0, 0, 0, 0, 0, 0])
else:
C_hopw = ([14534, 14534, 14534, 14534, 18170, 19620, 0])
C_lopw = ([0, 0, 0, 0, 0, 0, 0])
C_m = sum((A_lopw * C_lopw + A_hopw * C_hopw))
#Effective mass area [m^2]
A_m = C_m ** 2 / sum((A_lopw * np.exp2(C_lopw) + A_hopw * np.exp2(C_hopw)))
return n_walls, f_low_diff, f_low_dir, ori, surf_az, slope, A_t, A_fl, A_lopw_t, A_hopw_t, A_gl_t, A_fr_t, A_lopw,\
A_hopw, A_gl, h_cl, C_m, A_m, U_hopw, U_lopw, U_fr, U_gl
def w_t_RH(p_atm=None, t=None, RH=None):
from math import exp
#Humidity ratio as function of drybulb temperature and humidity ratio
p_w_s = exp((17.438 * t / (239.78 + t)) + 6.4147)#partial pressure of saturated water vapor
p_w = RH * p_w_s
w = (p_w * 0.62198) / (p_atm - p_w)
return w
def ZENITHANG(Lat=None, Long=None, Long_st=None, n=None, h=None):
from math import pi,cos,sin,acos
from numpy import fix
#ZENITH ANGLE
#Ref: Duffie,J.A.,<NAME>. 1980. Solar engineering of thermal
#processes. 2nd Edition. <NAME> & Sons.
#OUTPUTS
# -h_sol: Solar time (in hours)
# -h_sol_per: Solar time (in hours per day)
# -phi: Latitude in radians
# -delta: Declination angle in radians
# -omega: Hour angle in radians
# -theta_z: Zenith angle in radians, i.e. angle of incidence of beam radiation on a horizontal surface
#INPUTS
# -Lat: Latitude of the location (north positive) -90<Lat<90
# -Long: Longitude of the location (west positive) 0<Long<180
# -Long_st: Longitude of the standard meridian of the time zone
# -n: day 1<n<365
# -h: hour 1<h<8760
#Angles in radians%
phi = Lat * pi / 180
#Summer time correction (Masy, 2008)
epsilon_summer = 1
#Equation of time (minutes)
B = (n - 1) * 360 / 365 * pi / 180
E = 229.2 * (0.000075 + 0.001868 * cos(B) - 0.032077 * sin(B) - 0.014615 * cos(2 * B) - 0.04089 * sin(2 * B))
#Solar time (in hours)
h_sol = h + (4 * (Long_st - Long) + E) / 60 - epsilon_summer
#Solar time (in hours per day)
h_sol_per_1 = h_sol - 24 * fix(h_sol / 24)
if h_sol_per_1 <= 1E-6:
h_sol_per = 24
else:
h_sol_per = h_sol_per_1
#Declination (angular position of the sun at solar noon, north positive)
#-23.45<delta<23.45
delta = 23.45 * sin(360 * (284 + n) / 365 * pi / 180) * pi / 180#(daily basis, Cooper in Duffie & Beckmann)
#Hour angle (morning negative, afternoon positive)
omega = (h_sol_per - 12) * 15 * pi / 180
#Zenith angle (between the vertical and the line to the sun)
theta_z = max(1E-5, acos(cos(delta) * cos(phi) * cos(omega) + sin(delta) * sin(phi)))
return phi, delta, omega, theta_z, h_sol
def CSITH(Lat=None, Long=None, Long_st=None, n=None, h=None):
from math import cos,exp
#Clear sky solar radiation
#OUTPUTS
# -I_th_cs: Clear sky theoretical solar radiation (in W/m2)
#INPUTS
# -Lat: Latitude of the location (north positive) -90<Lat<90
# -Long: Longitude of the location (west positive) 0<Long<180
# -Long_st: Longitude of the standard meridian of the time zone
# -n: day 1<n<365
# -h: hour 1<h<8760
#Main angles and solar time for location
phi, delta, omega, theta_z, h_sol = ZENITHANG(Lat, Long, Long_st, n, h)
#Extraterrestrial radiation
G_sc = 1353#W/m2 - Solar constant
I_on = G_sc * (1 + 0.033 * cos(360 * (h_sol / 24) / 365))#Normal extraterrestrial radiation
#Atmospheric transmittance for beam radiation (altitude = 0m)
tau_b = 0.12814 + 0.7568875 * exp(-0.387225 / (cos(theta_z)))
#Clear sky beam normal radiation
I_cnb = I_on * tau_b
#Clear sky horizontal beam radiation
I_cb = I_cnb * cos(theta_z)
#Atmospheric transmittance for diffuse radiation (altitude = 0m)
tau_d = 0.271 - 0.294 * tau_b
#Clear sky horizontal diffuse radiation
I_cd = I_on * tau_d * cos(theta_z)
#Total horizontal clear sky radiation
I_th_cs = max(0, (I_cb + I_cd))
#Simplified calculation (G.Masy)
I_th_cs2 = max(0, (0.7 * I_on * cos(theta_z)))
return I_th_cs,I_th_cs2
def Btest_cases(Btest=None, h=None):
if (Btest == 195 or Btest == 200):
#set points
T_i_set_h = 20
T_i_set_c = 20
#internal gain
Q_dot_appl = 0
#infiltrations
ACH_inf = 0
#SOLAR PROPERTIES
#SHGC
SHGC_gl_0 = pd.Series([0.789, 0.789, 0.789, 0.789, 0.789, 0, 0])
#IR emittance
epsilon_ir_hopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
epsilon_ir_lopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
epsilon_ir_gl = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
#Solar absorbance
alpha_hopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
alpha_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
#Solar Shadings
e_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #0=no solar shading; 1=interior solar shadings; 2=exterior solar shadings
mode_solshad = pd.Series([1, 1, 1, 1, 1, 0, 0]) #1=manual solar shadings; 2=automatic solar shadings
NL_ext_max = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Exterior natural lighting intensity for control of shadings
IAC_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Indoor solar Attenuation Coefficient (fraction of SHGC with solar shadings)
f_c_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Convective fraction of solar gains with solar shadings
#Ventilation
V_dot_vent = 0
elif Btest == 210 or Btest == 220:
#set points
T_i_set_h = 20
T_i_set_c = 20
#internal gain
Q_dot_appl = 0
#infiltrations
ACH_inf = 0
#SOLAR PROPERTIES
#SHGC
SHGC_gl_0 = pd.Series([0.789, 0.789, 0.789, 0.789, 0.789, 0, 0])
#IR emittance
epsilon_ir_hopw = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
epsilon_ir_lopw = pd.Series([0, 0, 0, 0, 0, 0, 0])
epsilon_ir_gl = pd.Series([0.9, 0.9, 0.9, 0.9, 0.9, 0, 0])
#Solar absorbance
alpha_hopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
alpha_lopw = pd.Series([0.1, 0.1, 0.1, 0.1, 0.1, 0, 0])
#Solar Shadings
e_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #0=no solar shading; 1=interior solar shadings; 2=exterior solar shadings
mode_solshad = pd.Series([1, 1, 1, 1, 1, 0, 0]) #1=manual solar shadings; 2=automatic solar shadings
NL_ext_max = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Exterior natural lighting intensity for control of shadings
IAC_solshad = pd.Series([0, 0, 0, 0, 0, 0, 0]) #Indoor solar Attenuation Coefficient (fraction of SHGC with solar shadings)
f_c_solshad = | pd.Series([0, 0, 0, 0, 0, 0, 0]) | pandas.Series |
#!/usr/bin/python
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from bs4 import BeautifulSoup
import pandas as pd
import time
row_list = list()
columns = ['date','rank','team','points']
driver = webdriver.Chrome("/Library/Frameworks/Python.framework/Versions/3.9/bin/chromedriver")
ids = ['12252', '12280', '12315', '12350', '12385', '12406', '12455', '12511', '12582', '12623',
'12679', '12714', '12749', '12770', '12833', '12882', '12883', '12884', '13043', '13078', '13113', '13127',
'13197', '13245', '13295']
for id in ids:
url = "https://www.fifa.com/fifa-world-ranking/ranking-table/men/rank/id"+id+"/#all"
print(url)
driver.get(url)
content = driver.page_source
soup = BeautifulSoup(content, "html.parser")
index = 0
date = soup.find('div', {"class": "fi-selected-item"}).text
try:
button = driver.find_element_by_xpath("//button[@id='onetrust-accept-btn-handler']")
picture = button.click()
except NoSuchElementException:
print("Error ?")
links = driver.find_elements_by_xpath("//ul[@class='pagination']//li/a")
print("links = ", links)
print("links = ", len(links))
count_links = range(1, len(links)+1)
print("count_links = ", count_links)
for i in count_links:
print("i = ", i)
try:
if(i > 1):
link = driver.find_element_by_xpath("//ul[@class='pagination']/li["+str(i)+"]/a")
actions = ActionChains(driver)
actions.move_to_element(link).perform()
picture = link.click()
#time.sleep(5)
except NoSuchElementException:
break
content = driver.page_source
soup = BeautifulSoup(content, "html.parser")
table = soup.find('table', {"id": "rank-table"})
rows = table.find('tbody').find_all('tr')
for tr in rows:
rank = tr.find('td', {"class": "fi-table__rank"}).text
#print("rank = ", rank)
team = tr.find('td', {"class": "fi-table__teamname"}).find('span', {"class": "fi-t__nText"}).text
#print("team = ", team)
points = tr.find('td', {"class": "fi-table__points"}).text
#print("points = ", points)
row = [date, rank, team, points]
if(len(row) == 4):
row_list.append(row)
df = pd.DataFrame(row_list, columns=columns)
print(df.shape)
df.to_csv('fifa_rankings_all.csv', index=False, encoding='utf-8-sig')
index += 1
df = | pd.DataFrame(row_list,columns=columns) | pandas.DataFrame |
import logging
import numpy as np
import pandas as pd
from msiwarp.util.warp import to_mz, to_height, to_mx_peaks, generate_mean_spectrum
from msi_recal.join_by_mz import join_by_mz
from msi_recal.math import (
mass_accuracy_bounds,
weighted_stddev,
peak_width,
mass_accuracy_bound_indices,
)
from msi_recal.params import AnalyzerType
logger = logging.getLogger(__name__)
def _get_mean_spectrum(
mx_spectra: np.ndarray,
analyzer: AnalyzerType,
sigma_1: float,
):
tics = np.array([np.sum(to_height(s)) for s in mx_spectra])
# min_mz = np.floor(np.min([s[0].mz for s in mx_spectra if len(s)]))
# max_mz = np.ceil(np.max([s[-1].mz for s in mx_spectra if len(s)]))
min_mz = np.floor(np.min([np.min(to_mz(s)) for s in mx_spectra if len(s)]))
max_mz = np.ceil(np.max([np.max(to_mz(s)) for s in mx_spectra if len(s)]))
# MSIWarp's generate_mean_spectrum needs a temporary array to store a fuzzy histogram of peaks
# with a distribution function that ensures the peak width is a constant number of bins
# throughout the m/z range. The formula for this is different for each analyzer.
# n_points specifies how big the temporary array should be. If it's set too low, the function
# silently fails. If it's set too high, it takes longer to run and there are console warnings.
# Predict the required number of n_points so that neither of these conditions are hit.
# A buffer of 10% + 1000 is added to compensate for numerical error
exp = {'tof': 1, 'orbitrap': 1.5, 'ft-icr': 2}[analyzer]
density_samples = np.linspace(min_mz, max_mz, 100) ** exp * 0.25 * sigma_1
n_points = int(
(max_mz - min_mz) / np.average(density_samples, weights=1 / density_samples) * 1.1 + 1000
)
return generate_mean_spectrum(
mx_spectra,
n_points,
sigma_1,
min_mz,
max_mz,
tics,
analyzer,
stride=1,
)
def make_spectra_df(spectra):
return pd.DataFrame(
{
'sp_i': np.concatenate(
[np.full(len(mzs), sp_i, dtype=np.uint32) for sp_i, mzs, ints in spectra]
),
'mz': np.concatenate([mzs for sp_i, mzs, ints in spectra]),
'ints': np.concatenate([ints for sp_i, mzs, ints in spectra]),
}
).sort_values('mz')
def representative_spectrum(
spectra_df: pd.DataFrame,
mean_spectrum: pd.DataFrame,
analyzer: AnalyzerType,
sigma_1: float,
denoise=False,
):
"""Finds the single spectrum that is most similar to the mean spectrum"""
orig_mean_spectrum = mean_spectrum
if denoise:
# Exclude peaks that only exist in small number of spectra, have high m/z variability
# (which suggests that multiple peaks were grouped together), or are near other more
# intense peaks
# mean_spectrum = mean_spectrum[mean_spectrum.n_hits > 1]
_ints = mean_spectrum.ints.values
_mz = mean_spectrum.mz.values
local_lo, local_hi = mass_accuracy_bound_indices(_mz, _mz, analyzer, sigma_1 * 2)
local_maximum_score = np.array(
[
lo >= hi - 1 or i == lo + np.argmax(_ints[lo:hi])
for i, (lo, hi) in enumerate(zip(local_lo, local_hi))
]
)
peak_score = (
mean_spectrum.coverage
* (0.1 + local_maximum_score)
* (1 - np.clip(mean_spectrum.mz_stddev / mean_spectrum.mz_tol, 0, 1))
)
mean_spectrum = sample_across_mass_range(mean_spectrum, peak_score, n_per_bin=500)
logger.debug(
f'Denoising reduced peaks from {len(orig_mean_spectrum)} to {len(mean_spectrum)}'
)
# Find the spectrum that's most similar to the background spectrum
mean_spectrum = mean_spectrum.rename(columns={'mz': 'mean_mz', 'ints': 'mean_ints'})
spectrum_scores = {}
processed_spectra = {}
for sp, grp in spectra_df.groupby('sp'):
joined = join_by_mz(mean_spectrum, 'mean_mz', grp, 'mz', analyzer, sigma_1, how='left')
mz_tol = peak_width(joined.mz, analyzer, sigma_1) / 2
joined['mz_err'] = np.clip((joined.mean_mz - joined.mz.fillna(0)) / mz_tol, -1, 1)
a = joined.mean_ints
b = joined.ints.fillna(0)
mz_err = max(joined.mz_err.abs().sum(), 0.0001)
# score = cosine_similarity(mean_ints, ints) / mz_err.sum()
spectrum_scores[sp] = np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)) / mz_err
if denoise:
processed_spectra[sp] = joined[['sp', 'mz', 'ints']][~joined.ints.isna()]
else:
processed_spectra[sp] = grp
# Return the best scoring spectrum
best_sp = pd.Series(spectrum_scores).idxmax()
logger.debug(f'Choose representative spectrum: {best_sp}')
return processed_spectra[best_sp].sort_values('mz')
def hybrid_mean_spectrum(spectra_df, analyzer, sigma_1, min_coverage=0):
from msiwarp.util.warp import to_mz
if not spectra_df.mz.is_monotonic_increasing:
spectra_df = spectra_df.sort_values('mz')
n_spectra = spectra_df.sp.nunique()
mx_spectra = [
to_mx_peaks(grp.mz, grp.ints, sigma_1, sp, analyzer) for sp, grp in spectra_df.groupby('sp')
]
logger.debug(f'Converted {sum(map(len, mx_spectra))} peaks to mx.peak')
mean_spectrum = _get_mean_spectrum(mx_spectra, analyzer, sigma_1)
mean_spectrum_df = pd.DataFrame(
{'mz': to_mz(mean_spectrum), 'ints': np.float32(to_height(mean_spectrum))}
).sort_values('mz')
logger.debug(f'MSIWarp generate_mean_spectrum returned {len(mean_spectrum_df)} peaks')
lo_mzs, hi_mzs = mass_accuracy_bounds(mean_spectrum_df.mz.values, analyzer, sigma_1)
lo_idxs = np.searchsorted(spectra_df.mz, lo_mzs, 'left')
hi_idxs = np.searchsorted(spectra_df.mz, hi_mzs, 'right')
results = []
for lo_idx, hi_idx, mz_tol, mx_mz, mx_ints, lo_mz, hi_mz in zip(
lo_idxs,
hi_idxs,
hi_mzs - lo_mzs,
mean_spectrum_df.mz,
mean_spectrum_df.ints,
lo_mzs,
hi_mzs,
):
# if np.abs(mx_mz - 211.010248) < 0.005:
# print(lo_idx, hi_idx, mz_tol, mx_mz, mx_ints, lo_mz, hi_mz)
# sp_ids = spectra_df.sp.iloc[lo_idx:hi_idx].unique()
# print(f'sp_ids ({len(sp_ids)}):', sp_ids)
# print('n_spectra:', n_spectra)
if hi_idx != lo_idx and hi_idx - lo_idx >= n_spectra * min_coverage:
n_hits = spectra_df.sp.iloc[lo_idx:hi_idx].nunique()
if n_hits >= n_spectra * min_coverage:
mzs = spectra_df.mz.iloc[lo_idx:hi_idx]
ints = spectra_df.ints.iloc[lo_idx:hi_idx]
mz_mean, mz_stddev = weighted_stddev(mzs, ints)
ints_mean = sum(ints) / n_spectra
results.append(
{
'mz': mz_mean,
'mz_stddev': mz_stddev,
'mz_mx': mx_mz,
'mz_tol': mz_tol,
'ints': ints_mean,
'ints_stddev': np.sqrt(np.average((ints - ints_mean) ** 2)),
'ints_mx': mx_ints,
'coverage': n_hits / n_spectra,
'n_hits': n_hits,
}
)
logger.debug(f'Hybrid_mean_spectrum returned {len(results)} peaks (sigma_1: {sigma_1})')
return pd.DataFrame(results)
def sample_across_mass_range(spectrum: pd.DataFrame, scores, n_bins=4, n_per_bin=250):
"""For ensuring an even distribution of peaks across the mass range, get split the mass range
into `n_bins` even bins and take the highest-scored `n_per_bin` from each bin."""
assert len(spectrum) == len(scores)
bin_edges = np.histogram_bin_edges(spectrum.mz.values, n_bins)
bins = []
for i in range(n_bins):
bin_mask = spectrum.mz.between(bin_edges[i], bin_edges[i + 1])
idxs = np.argsort(scores[bin_mask])[-n_per_bin:]
logger.debug(f'Chose {len(idxs)} peaks from {bin_edges[i]:.0f}-{bin_edges[i + 1]:.0f}')
bins.append(spectrum[bin_mask].iloc[idxs])
return | pd.concat(bins) | pandas.concat |
import pandas as pd
import numpy as np
import os
import subprocess
def boaDataMake(X, Y, xlabels, ylabel):
wd = os.getcwd()
if not os.path.isdir('temp'):
os.mkdir('temp')
os.chdir('temp')
f = open('X.txt', 'w+')
xdic = {}
X = pd.DataFrame(X, columns = xlabels)
for label in X:
for cat in set(X[label]):
key = label + '_' + str(cat)
xdic[key] = []
for i in X[label]:
if i == cat:
xdic[key].append(1)
else:
xdic[key].append(0)
X = | pd.DataFrame.from_dict(xdic) | pandas.DataFrame.from_dict |
"""Handle the raw data input/output and interface with external formats."""
from obspy.core import read
from obspy.core.utcdatetime import UTCDateTime
import pandas as pd
import datetime as dt
def load_stream(path):
"""Loads a Stream object from the file at path.
Args:
path: path to the input file, (for supported formats see,
http://docs.obspy.org/tutorial/code_snippets/reading_seismograms.html)
Returns:
an obspy.core.Stream object
(http://docs.obspy.org/packages/autogen/obspy.core.stream.Stream.html#obspy.core.stream.Stream)
"""
stream = read(path)
stream.merge()
# assert len(stream) == 3 # We need X,Y,Z traces
return stream
def load_catalog(path):
"""Loads a event catalog from a .csv file.
Each row in the catalog references a know seismic event.
Args:
path: path to the input .csv file.
Returns:
catalog: A Pandas dataframe.
"""
catalog = pd.read_csv(path)
# Check if utc_timestamp exists, otherwise create it
if 'utc_timestamp' not in catalog.columns:
utc_timestamp = []
for e in catalog.origintime.values:
utc_timestamp.append(UTCDateTime(e).timestamp)
catalog['utc_timestamp'] = utc_timestamp
return catalog
def write_stream(stream, path):
stream.write(path, format='MSEED')
def write_catalog(events, path):
catalog = pd.DataFrame(
{'utc_timestamp': pd.Series([t.timestamp for t in events])})
catalog.to_csv(path)
def write_catalog_with_clusters(events, clusters, latitudes, longitudes, depths, path):
catalog = pd.DataFrame(
{'utc_timestamp': pd.Series([t for t in events]),
"cluster_id": pd.Series([cluster_id for cluster_id in clusters]),
"latitude": | pd.Series([lat for lat in latitudes]) | pandas.Series |
import sys
import pandas as pd
import scipy
import numpy as np
from datetime import datetime, timedelta
co_df = | pd.read_csv(sys.argv[1]) | pandas.read_csv |
import nose
import pandas
from pandas.compat import u
from pandas.util.testing import network
from pandas.util.testing import assert_frame_equal
from numpy.testing.decorators import slow
from pandas.io.wb import search, download, get_countries
import pandas.util.testing as tm
class TestWB(tm.TestCase):
@slow
@network
def test_wdi_search(self):
raise nose.SkipTest
expected = {u('id'): {2634: u('GDPPCKD'),
4649: u('NY.GDP.PCAP.KD'),
4651: u('NY.GDP.PCAP.KN'),
4653: u('NY.GDP.PCAP.PP.KD')},
u('name'): {2634: u('GDP per Capita, constant US$, '
'millions'),
4649: u('GDP per capita (constant 2000 US$)'),
4651: u('GDP per capita (constant LCU)'),
4653: u('GDP per capita, PPP (constant 2005 '
'international $)')}}
result = search('gdp.*capita.*constant').ix[:, :2]
expected = pandas.DataFrame(expected)
expected.index = result.index
assert_frame_equal(result, expected)
@slow
@network
def test_wdi_download(self):
raise nose.SkipTest
expected = {'GDPPCKN': {(u('United States'), u('2003')): u('40800.0735367688'), (u('Canada'), u('2004')): u('37857.1261134552'), (u('United States'), u('2005')): u('42714.8594790102'), (u('Canada'), u('2003')): u('37081.4575704003'), (u('United States'), u('2004')): u('41826.1728310667'), (u('Mexico'), u('2003')): u('72720.0691255285'), (u('Mexico'), u('2004')): u('74751.6003347038'), (u('Mexico'), u('2005')): u('76200.2154469437'), (u('Canada'), u('2005')): u('38617.4563629611')}, 'GDPPCKD': {(u('United States'), u('2003')): u('40800.0735367688'), (u('Canada'), u('2004')): u('34397.055116118'), (u('United States'), u('2005')): u('42714.8594790102'), (u('Canada'), u('2003')): u('33692.2812368928'), (u('United States'), u('2004')): u('41826.1728310667'), (u('Mexico'), u('2003')): u('7608.43848670658'), (u('Mexico'), u('2004')): u('7820.99026814334'), (u('Mexico'), u('2005')): u('7972.55364129367'), (u('Canada'), u('2005')): u('35087.8925933298')}}
expected = pandas.DataFrame(expected)
result = download(country=['CA', 'MX', 'US', 'junk'], indicator=['GDPPCKD',
'GDPPCKN', 'junk'], start=2003, end=2005)
expected.index = result.index
assert_frame_equal(result, | pandas.DataFrame(expected) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This module contains the ReadSets class that is in charge
of reading the sets files, reshaping them to be used in
the build class, creating and reading the parameter files and
checking the errors in the definition of the sets and parameters
"""
import itertools as it
from openpyxl import load_workbook
import pandas as pd
from hypatia.error_log.Checks import (
check_nan,
check_index,
check_index_data,
check_table_name,
check_mapping_values,
check_mapping_ctgry,
check_sheet_name,
check_tech_category,
check_carrier_type,
check_years_mode_consistency,
)
from hypatia.error_log.Exceptions import WrongInputMode
import numpy as np
from hypatia.utility.constants import (
global_set_ids,
regional_set_ids,
technology_categories,
carrier_types,
)
from hypatia.utility.constants import take_trade_ids, take_ids, take_global_ids
MODES = ["Planning", "Operation"]
class ReadSets:
""" Class that reads the sets of the model, creates the parameter files with
default values and reads the filled parameter files
Attributes
------------
mode:
The mode of optimization including the operation and planning mode
path:
The path of the set files given by the user
glob_mapping : dict
A dictionary of the global set tables given by the user in the global.xlsx file
mapping : dict
A dictionary of the regional set tables given by the user in the regional
set files
connection_sheet_ids: dict
A nested dictionary that defines the sheet names of the parameter file of
the inter-regional links with their default values, indices and columns
global_sheet_ids : dict
A nested dictionary that defines the sheet names of the global parameter file
with their default values, indices and columns
regional_sheets_ids : dict
A nested dictionary that defines the sheet names of the regional parameter files
with their default values, indices and columns
trade_data : dict
A nested dictionary for storing the inter-regional link data
global_data : dict
A nested dictionary for storing the global data
data : dict
A nested dictionary for storing the regional data
"""
def __init__(self, path, mode="Planning"):
self.mode = mode
self.path = path
self._init_by_xlsx()
def _init_by_xlsx(self,):
"""
Reads and organizes the global and regional sets
"""
glob_mapping = {}
wb_glob = load_workbook(r"{}/global.xlsx".format(self.path))
sets_glob = wb_glob["Sets"]
set_glob_category = {key: value for key, value in sets_glob.tables.items()}
for entry, data_boundary in sets_glob.tables.items():
data_glob = sets_glob[data_boundary]
content = [[cell.value for cell in ent] for ent in data_glob]
header = content[0]
rest = content[1:]
df = pd.DataFrame(rest, columns=header)
glob_mapping[entry] = df
self.glob_mapping = glob_mapping
check_years_mode_consistency(
mode=self.mode, main_years=list(self.glob_mapping["Years"]["Year"])
)
for key, value in self.glob_mapping.items():
check_table_name(
file_name="global",
allowed_names=list(global_set_ids.keys()),
table_name=key,
)
check_index(value.columns, key, "global", pd.Index(global_set_ids[key]))
check_nan(key, value, "global")
if key == "Technologies":
check_tech_category(value, technology_categories, "global")
if key == "Carriers":
check_carrier_type(value, carrier_types, "global")
self.regions = list(self.glob_mapping["Regions"]["Region"])
self.main_years = list(self.glob_mapping["Years"]["Year"])
if "Timesteps" in self.glob_mapping.keys():
self.time_steps = list(self.glob_mapping["Timesteps"]["Timeslice"])
self.timeslice_fraction = self.glob_mapping["Timesteps"][
"Timeslice_fraction"
].values
else:
self.time_steps = ["Annual"]
self.timeslice_fraction = np.ones((1, 1))
# possible connections among the regions
if len(self.regions) > 1:
lines_obj = it.permutations(self.regions, r=2)
self.lines_list = []
for item in lines_obj:
if item[0] < item[1]:
self.lines_list.append("{}-{}".format(item[0], item[1]))
mapping = {}
for reg in self.regions:
wb = load_workbook(r"{}/{}.xlsx".format(self.path, reg))
sets = wb["Sets"]
self._setbase_reg = [
"Technologies",
"Carriers",
"Carrier_input",
"Carrier_output",
]
set_category = {key: value for key, value in sets.tables.items()}
reg_mapping = {}
for entry, data_boundary in sets.tables.items():
data = sets[data_boundary]
content = [[cell.value for cell in ent] for ent in data]
header = content[0]
rest = content[1:]
df = pd.DataFrame(rest, columns=header)
reg_mapping[entry] = df
mapping[reg] = reg_mapping
for key, value in mapping[reg].items():
check_table_name(
file_name=reg,
allowed_names=list(regional_set_ids.keys()),
table_name=key,
)
check_index(value.columns, key, reg, pd.Index(regional_set_ids[key]))
check_nan(key, value, reg)
if key == "Technologies":
check_tech_category(value, technology_categories, reg)
if key == "Carriers":
check_carrier_type(value, carrier_types, reg)
if key == "Carrier_input" or key == "Carrier_output":
check_mapping_values(
value,
key,
mapping[reg]["Technologies"],
"Technologies",
"Technology",
"Technology",
reg,
)
check_mapping_values(
mapping[reg]["Carrier_input"],
"Carrier_input",
mapping[reg]["Carriers"],
"Carriers",
"Carrier_in",
"Carrier",
reg,
)
check_mapping_values(
mapping[reg]["Carrier_output"],
"Carrier_output",
mapping[reg]["Carriers"],
"Carriers",
"Carrier_out",
"Carrier",
reg,
)
check_mapping_ctgry(
mapping[reg]["Carrier_input"],
"Carrier_input",
mapping[reg]["Technologies"],
"Supply",
reg,
)
check_mapping_ctgry(
mapping[reg]["Carrier_output"],
"Carrier_output",
mapping[reg]["Technologies"],
"Demand",
reg,
)
self.mapping = mapping
Technologies = {}
for reg in self.regions:
regional_tech = {}
for key in list(self.mapping[reg]["Technologies"]["Tech_category"]):
regional_tech[key] = list(
self.mapping[reg]["Technologies"].loc[
self.mapping[reg]["Technologies"]["Tech_category"] == key
]["Technology"]
)
Technologies[reg] = regional_tech
self.Technologies = Technologies
self._create_input_data()
def _create_input_data(self):
"""
Defines the sheets, indices and columns of the parameter files
"""
if len(self.regions) > 1:
# Create the columns of inter-regional links as a multi-index of the
# pairs of regions and the transmitted carriers
indexer = pd.MultiIndex.from_product(
[self.lines_list, self.glob_mapping["Carriers_glob"]["Carrier"]],
names=["Line", "Transmitted Carrier"],
)
self.connection_sheet_ids = {
"F_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"V_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Residual_capacity": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Capacity_factor_line": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Line_efficiency": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"AnnualProd_perunit_capacity": {
"value": 1,
"index": pd.Index(
["AnnualProd_Per_UnitCapacity"], name="Performance Parameter"
),
"columns": indexer,
},
}
self.global_sheet_ids = {
"Max_production_global": {
"value": 1e30,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
(
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
)
& (
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Storage"
)
]["Technology"],
},
"Min_production_global": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
(
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
)
& (
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Storage"
)
]["Technology"],
},
"Glob_emission_cap_annual": {
"value": 1e30,
"index": pd.Index(self.main_years, name="Years"),
"columns": ["Global Emission Cap"],
},
}
if self.mode == "Planning":
self.connection_sheet_ids.update(
{
"INV": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Decom_cost": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Min_totalcap": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Max_totalcap": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Min_newcap": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Max_newcap": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer,
},
"Line_lifetime": {
"value": 1,
"index": pd.Index(
["Technical Life Time"], name="Performance Parameter"
),
"columns": indexer,
},
"Line_Economic_life": {
"value": 1,
"index": pd.Index(
["Economic Life time"], name="Performance Parameter"
),
"columns": indexer,
},
"Interest_rate": {
"value": 0.05,
"index": pd.Index(
["Interest Rate"], name="Performance Parameter"
),
"columns": indexer,
},
}
)
self.global_sheet_ids.update(
{
"Min_totalcap_global": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
]["Technology"],
},
"Max_totalcap_global": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
]["Technology"],
},
"Min_newcap_global": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
]["Technology"],
},
"Max_newcap_global": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": self.glob_mapping["Technologies_glob"].loc[
self.glob_mapping["Technologies_glob"]["Tech_category"]
!= "Demand"
]["Technology"],
},
"Discount_rate": {
"value": 0.05,
"index": pd.Index(self.main_years, name="Years"),
"columns": ["Annual Discount Rate"],
},
}
)
self.regional_sheets_ids = {}
indexer_reg = {}
indexer_reg_drop1 = {}
indexer_reg_drop2 = {}
add_indexer = {}
conversion_plus_indexin = {}
conversion_plus_indexout = {}
# Creates the columns of the carrier_ratio_in and carrier_ratio_out sheets
# by finding the conversion plus technologies and their input and output carriers
for reg in self.regions:
if "Conversion_plus" in self.Technologies[reg].keys():
take_carrierin = [
self.mapping[reg]["Carrier_input"]
.loc[self.mapping[reg]["Carrier_input"]["Technology"] == tech][
"Carrier_in"
]
.values
for tech in self.Technologies[reg]["Conversion_plus"]
]
take_carrierin_ = [
carr
for index, value in enumerate(take_carrierin)
for carr in take_carrierin[index]
]
take_technologyin = [
self.mapping[reg]["Carrier_input"]
.loc[self.mapping[reg]["Carrier_input"]["Technology"] == tech][
"Technology"
]
.values
for tech in self.Technologies[reg]["Conversion_plus"]
]
take_technologyin_ = [
tech
for index, value in enumerate(take_technologyin)
for tech in take_technologyin[index]
]
take_carrierout = [
self.mapping[reg]["Carrier_output"]
.loc[self.mapping[reg]["Carrier_output"]["Technology"] == tech][
"Carrier_out"
]
.values
for tech in self.Technologies[reg]["Conversion_plus"]
]
take_carrierout_ = [
carr
for index, value in enumerate(take_carrierout)
for carr in take_carrierout[index]
]
take_technologyout = [
self.mapping[reg]["Carrier_output"]
.loc[self.mapping[reg]["Carrier_output"]["Technology"] == tech][
"Technology"
]
.values
for tech in self.Technologies[reg]["Conversion_plus"]
]
take_technologyout_ = [
tech
for index, value in enumerate(take_technologyout)
for tech in take_technologyout[index]
]
conversion_plus_indexin[reg] = pd.MultiIndex.from_arrays(
[take_technologyin_, take_carrierin_],
names=["Tech_category", "Technology"],
)
conversion_plus_indexout[reg] = pd.MultiIndex.from_arrays(
[take_technologyout_, take_carrierout_],
names=["Tech_category", "Technology"],
)
# Creates the columns of the technology-specific parameter files
# based on the technology categories and the technologies within each
# caregory
dict_ = self.Technologies[reg]
level1 = []
level2 = []
for key, values in dict_.items():
if key != "Demand":
for value in values:
level1.append(key)
level2.append(value)
indexer_reg[reg] = pd.MultiIndex.from_arrays(
[level1, level2], names=["Tech_category", "Technology"]
)
if "Storage" in self.Technologies[reg].keys():
indexer_reg_drop1[reg] = indexer_reg[reg].drop("Storage", level=0)
else:
indexer_reg_drop1[reg] = indexer_reg[reg]
if "Transmission" in self.Technologies[reg].keys():
indexer_reg_drop2[reg] = indexer_reg_drop1[reg].drop(
"Transmission", level=0
)
else:
indexer_reg_drop2[reg] = indexer_reg_drop1[reg]
level1_ = level1 * 2
level2_ = level2 * 2
tax = []
sub = []
for tech in level2:
tax.append("Tax")
sub.append("Sub")
taxsub = tax + sub
add_indexer[reg] = pd.MultiIndex.from_arrays(
[taxsub, level1_, level2_],
names=["Taxes or Subsidies", "Tech_category", "Technology"],
)
self.regional_sheets_ids[reg] = {
"F_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"V_OM": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"Residual_capacity": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"Max_production": {
"value": 1e20,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg_drop2[reg],
},
"Min_production": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg_drop2[reg],
},
"Capacity_factor_tech": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"Tech_efficiency": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg_drop1[reg],
},
"Specific_emission": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg_drop2[reg],
},
"Carbon_tax": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg_drop2[reg],
},
"Fix_taxsub": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": add_indexer[reg],
},
"Emission_cap_annual": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": ["Emission Cap"],
},
"AnnualProd_perunit_capacity": {
"value": 1,
"index": pd.Index(
["AnnualProd_Per_UnitCapacity"], name="Performance Parameter"
),
"columns": indexer_reg[reg],
},
}
if self.mode == "Planning":
self.regional_sheets_ids[reg].update(
{
"INV": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"Investment_taxsub": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": add_indexer[reg],
},
"Decom_cost": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"Min_totalcap": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"Max_totalcap": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"Min_newcap": {
"value": 0,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"Max_newcap": {
"value": 1e10,
"index": pd.Index(self.main_years, name="Years"),
"columns": indexer_reg[reg],
},
"Discount_rate": {
"value": 0.05,
"index": pd.Index(self.main_years, name="Years"),
"columns": ["Annual Discount Rate"],
},
"Tech_lifetime": {
"value": 1,
"index": pd.Index(
["Technical Life time"], name="Performance Parameter"
),
"columns": indexer_reg[reg],
},
"Economic_lifetime": {
"value": 1,
"index": pd.Index(
["Economic Life time"], name="Performance Parameter"
),
"columns": indexer_reg[reg],
},
"Interest_rate": {
"value": 0.05,
"index": pd.Index(
["Interest Rate"], name="Performance Parameter"
),
"columns": indexer_reg[reg],
},
}
)
if "Storage" in self.Technologies[reg].keys():
self.regional_sheets_ids[reg].update(
{
"Storage_charge_efficiency": {
"value": 1,
"index": pd.Index(self.main_years, name="Years"),
"columns": pd.Index(
self.Technologies[reg]["Storage"], name="Technology"
),
},
"Storage_discharge_efficiency": {
"value": 1,
"index": | pd.Index(self.main_years, name="Years") | pandas.Index |
from boonai.project.site.helper import extract_section, get_html_pagination_params
from flask import Blueprint, render_template, request, redirect, current_app
from flask import url_for, session, flash
from flask_uploads import UploadSet
from flask_user import login_required, current_user
from wtforms import StringField, SelectField, SelectMultipleField, SubmitField, BooleanField
from wtforms.validators import Length
from flask_wtf import FlaskForm
import requests
from pandas import DataFrame
from os.path import join, isfile
from os import listdir
import shutil
from tempfile import mkdtemp
import magic
import pandas as pd
from os.path import splitext
import chardet
from csv import Sniffer
dropzone_files = UploadSet('files') # allowed file types are defined in the config.
mod = Blueprint('site_dataprep', __name__, template_folder='templates')
class DatasetFieldsForm(FlaskForm):
selected = SelectMultipleField(u'Inputs and Targets')
class FilterForm(FlaskForm):
# column = SelectField(u'Column', ['id', 'text'])
field = SelectField('Field to filter')
headers = StringField( # TODO : make this extract just a single section
'Headers',
render_kw={"placeholder": "headers to extract"}
)
keywords = StringField(
'Filter on keyword',
render_kw={"placeholder": "extract if contains keyword"}
)
test_button = SubmitField()
submit_button = SubmitField()
class SubmitProcessed(FlaskForm):
dropdown_list = [('text', 'Text'), ('numeric', 'Numeric'),('mixed', 'Mixed')]
name = StringField(
'Dataset Name',
[Length(min=5, max=25)]
)
description = StringField(
'Dataset Description',
[Length(min=5, max=35)]
)
train = BooleanField(
'Train',
)
test = BooleanField(
'Test',
)
label = BooleanField(
'Label',
)
features_type = SelectField('Features type', choices=dropdown_list, default=1)
labels_type = SelectField('Labels type', choices=dropdown_list, default=1)
def _get_link(links, rel_value):
for l in links:
if l['rel'] == rel_value:
return l['href']
raise ValueError('No file relation found in the links list')
@mod.route('/dropzone', methods=['GET', 'POST'])
@login_required
def dropzone():
if request.method == 'POST':
session['dataset'] = None
file_items = request.files
values = file_items.values()
types = [v.content_type for v in values]
if not session['content_type']:
session['content_type'] = types[0]
if not session['extension']:
filename = next(iter(file_items.values())).filename
session['extension'] = splitext(filename)[1]
is_homogeneous = len(set(types)) == 1
if is_homogeneous:
for key in file_items:
file_path = join(session['tmp_dir'], file_items[key].filename)
file_items[key].save(file_path)
return "uploading..."
if 'tmp_dir' in session and session['tmp_dir']:
try:
shutil.rmtree(session['tmp_dir'])
except FileNotFoundError:
print('file didn\'t exist')
session['tmp_dir'] = mkdtemp()
session['content_type'] = None
session['extension'] = None
return render_template('dataprep/dropzone.html')
def texts_to_json(dir_path):
file_names = listdir(dir_path)
file_paths = [
join(dir_path, f)
for f
in file_names
if isfile(join(dir_path, f))
]
m = magic.Magic(mime=True)
dataset_json = {
'id': [],
'text': []
}
for file_name, file_path in zip(file_names, file_paths):
encoding = get_encoding(file_path)
print(
'current file {} is {}, ecoding {}'.format(
file_path,
m.from_file(file_path),
encoding
)
)
with open(file_path) as f:
dataset_json['id'].append(file_name)
dataset_json['text'].append(f.read().encode(encoding).decode('utf-8'))
return dataset_json
def get_encoding(file_path):
with open(file_path, 'rb') as f:
encoding_info_dict = chardet.detect(f.read())
print(encoding_info_dict)
return encoding_info_dict['encoding']
@mod.route('/converter')
@login_required
def converter():
if not session['extension'] or not session['content_type']:
flash('Unsupported file type', 'info')
return redirect(url_for('.dropzone'))
session['processed'] = False
session['outputs'] = mkdtemp()
is_csv = (
session['extension'] == '.csv' or
session['content_type'].startswith('text/csv')
)
is_excel = session['extension'] in ['.xls', '.xlsx'] or any(
s
in session['content_type']
for s in ['spreadsheet', 'xls', 'xlsx', 'excel']
)
is_text = (
session['extension'] == '.txt' or
session['content_type'].startswith('text/')
)
if is_csv:
file_name = listdir(session['tmp_dir'])[0]
file_path = join(session['tmp_dir'], file_name)
# guess file encoding
encoding = get_encoding(file_path)
# guess separator
with open(file_path, encoding=encoding) as f:
sniffer = Sniffer()
line = f.readline().encode(encoding).decode('utf-8')
dialect = sniffer.sniff(line)
df = pd.read_csv(file_path, encoding=encoding, dialect=dialect)
session['fields'] = df.columns.tolist()
elif is_excel:
file_name = listdir(session['tmp_dir'])[0]
file_path = join(session['tmp_dir'], file_name)
df = | pd.read_excel(file_path, encoding='utf-8') | pandas.read_excel |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import tqdm
import mut.thermo
import mut.bayes
constants = mut.thermo.load_constants()
# Load the raw data
data = pd.read_csv('../../data/Chure2019_compiled_data.csv', comment='#')
# Segregate the data by classifier
DNA_data = data[data['class']=='DNA'].copy()
IND_data = data[data['class']=='IND'].copy()
# Load the Stan models.
DNA_model = mut.bayes.StanModel('../stan/Chure2019_DNA_binding_energy.stan',
force_compile=True)
KaKi_model = mut.bayes.StanModel('../stan/Chure2019_KaKi_only.stan',
force_compile=True)
KaKi_epAI_model = mut.bayes.StanModel('../stan/Chure2019_KaKi_epAI.stan',
force_compile=True)
empirical_F_model = mut.bayes.StanModel('../stan/Chure2019_empirical_F_inference.stan',
force_compile=True)
# ##############################################################################
# DNA BINDING ENERGY INFERENCE
# ##############################################################################
mutant_dfs = []
summary_dfs = []
print('Beginning inference of DNA binding energy...')
for g, d in tqdm.tqdm(DNA_data.groupby(['mutant', 'repressors'])):
# Assemble the data dictionary.
data_dict = {'J':1,
'N': len(d),
'idx': np.ones(len(d)).astype(int),
'R': d['repressors'],
'Nns': constants['Nns'],
'ep_ai': constants['ep_AI'],
'Ka': constants['Ka'],
'Ki': constants['Ki'],
'n_sites': constants['n_sites'],
'c': d['IPTGuM'],
'fc': d['fold_change']}
# Sample!
samples, samples_df = DNA_model.sample(data_dict=data_dict)
# Get the parameter names and rename
parnames = samples.unconstrained_param_names()
new_names = {'{}[{}]'.format(m.split('.')[0], m.split('.')[1]):'{}'.format(m.split('.')[0]) for m in parnames}
samples_df.rename(columns=new_names, inplace=True)
# Add identifiers
samples_df['repressors'] = g[1]
samples_df['mutant'] = g[0]
samples_df['operator'] = d['operator'].unique()[0]
# Compute the summarized dataframe
_df = samples_df[['ep_RA', 'sigma', 'lp__']].copy()
summary_df = mut.stats.compute_statistics(_df, logprob_name='lp__')
summary_df['repressors'] = g[1]
summary_df['mutant'] = g[0]
summary_df['operator'] = d['operator'].unique()[0]
# Add to storage vector
mutant_dfs.append(samples_df)
summary_dfs.append(summary_df)
# Combine and save to disk
mutant_df = pd.concat(mutant_dfs, sort=False)
summary_df = pd.concat(summary_dfs, sort=False)
mutant_df.to_csv('../../data/Chure2019_DNA_binding_energy_samples.csv', index=False)
summary_df.to_csv('../../data/Chure2019_DNA_binding_energy_summary.csv', index=False)
print('finished!')
# ##############################################################################
# KA AND KI INFERENCE
# ##############################################################################
mutant_dfs = []
summary_dfs = []
print('Beginning inference of Ka and Ki...')
for g, d in tqdm.tqdm(IND_data.groupby(['mutant', 'operator'])):
# Assemble the data dictionary.
data_dict = {'J':1,
'N': len(d),
'idx': np.ones(len(d)).astype(int),
'R': d['repressors'],
'Nns': constants['Nns'],
'ep_AI': constants['ep_AI'],
'ep_RA': constants[g[1]],
'n_sites': constants['n_sites'],
'c': d['IPTGuM'],
'fc': d['fold_change']}
# Sample!
samples, samples_df = KaKi_model.sample(data_dict=data_dict, iter=2000,
control=dict(adapt_delta=0.99))
# Get the parameter names and rename
parnames = samples.unconstrained_param_names()
new_names = {'{}[{}]'.format(m.split('.')[0], m.split('.')[1]):'{}'.format(m.split('.')[0]) for m in parnames}
new_names['Ka[1]'] = 'Ka'
new_names['Ki[1]'] = 'Ki'
samples_df.rename(columns=new_names, inplace=True)
# Add identifiers
samples_df['operator'] = g[1]
samples_df['repressors'] = d['repressors'].unique()[0]
samples_df['mutant'] = g[0]
# Compute the summarized dataframe
_df = samples_df[['Ka', 'Ki', 'sigma', 'lp__']].copy()
summary_df = mut.stats.compute_statistics(_df, logprob_name='lp__')
summary_df['repressors'] = d['repressors'].unique()[0]
summary_df['mutant'] = g[0]
summary_df['operator'] = g[1]
# Add to storage vector
mutant_dfs.append(samples_df)
summary_dfs.append(summary_df)
# Combine and save to disk
mutant_df = | pd.concat(mutant_dfs, sort=False) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from distutils.version import LooseVersion
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
from datetime import datetime
plt.rcParams['font.size'] = 6
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
graphs_path = root_path+'/boundary_effect/graph/'
if not os.path.exists(graphs_path):
os.makedirs(graphs_path)
time = pd.read_csv(root_path+'/time_series/MonthlyRunoffWeiRiver.csv')['Time']
time = time.values
time = [datetime.strptime(t,'%Y/%m') for t in time]
time = [t.strftime('%b %Y') for t in time]
# print(time)
# CHECK 1: is VMD shift-invariant?
# If yes, any shifted copy of an IMF from a VMD decomposition, similar to a
# shifted copy of the original time series, should be maintained.
# For example, given the sunspot time series x (of length 792) we can
# generate a 1-step advanced copy of the original time series as follows:
# x0=(1:791)
# x1=(2:792) this is a 1-step advanced version of x0
# Observiously, shift-invariancy is preserved between x0 and x1 since
# x0(2:791)=x1(1:790)
# For shift-invariancy to be preserved for VMD, we would observe, for
# example, that the VMD IMF1 components for x0 (imf1 of x0) and x1 (imf1 of
# x1) should be exact copies of one another, advanced by a single step.
# i.e., x0_imf(2:791,1) should equal x1_imf(1:790,1) if shift-invariancy
# is preserved.
# As the case for VMD shown below, we can see the x0_imf(2:791,1) basically
# equal to x1_imf(1:790,1) except for a few samples close to the begin and
# end of x0 and x1. Interestingly, we see a low level of error close to the
# begin of the time series and a high level of error close to the end of
# the time series, of high importance in operational forecasting tasks.
# The errors along the middle range are zeros indicating VMD is
# shift-invariant.
# We argue that the error close to the boundaries are
# caused by boundary effect, which is the exact problem this study designed
# to solve.
# CHECK 2: The impact of appedning data points to a time series then
# performing VMD, analogous the case in operational forecasting when new
# data becomes available and an updated forecast is made using the newly
# arrived data.
# Ideally, for forecasting situations, when new data is appended to a time
# series and some preprocessing is performed, it should not have an impact
# on previous measurements of the pre-processed time series.
# For example, if IMF1_1:N represents the IMF1, which has N total
# measurements and was derived by applying VMD to x_1:N the we would expect
# that when we perform VMD when x is appended with another measurement,
# i.e., x_1:N+1, resulting in IMF1_1:N+1 that the first 1:N measurements in
# IMF1_1:N+1 are equal to IMF1_1:N. In other words,
# IMF1_1:N+1[1:N]=IMF1_1:N[1:N].
# We see than is not the case. Appending an additional observation to the
# time series results in the updated VMD components to be entirely
# different then the original (as of yet updated) VMD components.
# Interesting, we see a high level of error at the boundaries of the time
# seriesm, of high importance in operational forecasting tasks.
x0_imf = pd.read_csv(root_path+'/boundary_effect/vmd-decompositions-huaxian/x0_imf.csv')
x1_imf = pd.read_csv(root_path+'/boundary_effect/vmd-decompositions-huaxian/x1_imf.csv')
x_1_552_imf = | pd.read_csv(root_path+"/boundary_effect/vmd-decompositions-huaxian/x_1_552_imf.csv") | pandas.read_csv |
# Importing the Keras libraries and packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.metrics as metrics
from keras.models import Sequential
from keras.layers import Conv1D, Dropout
from keras.layers import MaxPooling1D
from keras.layers import Flatten
from keras.layers import Dense
# Importing the training setx
dataset_train = pd.read_csv('traffic_data.csv')
training_set = dataset_train.iloc[:, 5:6].values
from sklearn.model_selection import train_test_split
X_train_split, X_test_split = train_test_split(training_set, shuffle=False, test_size = 0.2, random_state = 0)
X_train_split = pd.DataFrame(X_train_split)
X_test_split = | pd.DataFrame(X_test_split) | pandas.DataFrame |
"""The user interface for accessing the Bader calulation.
Contains the Bader class, dictionaries containing the attributes of the Bader
class along with their types and a config file converter.
"""
from ast import literal_eval
from configparser import ConfigParser
from inspect import getmembers, ismodule
from pickle import dump
from warnings import catch_warnings, simplefilter
import numpy as np
import pandas as pd
from pybader import __config__, io
from pybader.thread_handlers import (assign_to_atoms, bader_calc, dtype_calc,
refine, surface_distance)
from pybader.utils import atom_assign, charge_sum, vacuum_assign, volume_mask
# Dictionary containing the private attributes and types of the Bader class
private_attributes = {
'_density': np.ndarray,
'_lattice': np.ndarray,
'_atoms': np.ndarray,
'_file_info': dict,
'_bader_maxima': np.ndarray,
'_vacuum_charge': float,
'_vacuum_volume': float,
'_dataframe': pd.DataFrame
}
# Dictionary containing the configurable attributes and types of the Bader class
config_attributes = {
'method': str,
'refine_method': str,
'vacuum_tol': (type(None), float),
'refine_mode': (str, int),
'bader_volume_tol': (type(None), float),
'export_mode': (type(None), str, int),
'prefix': str,
'output': str,
'threads': int,
'fortran_format': int,
'speed_flag': bool,
'spin_flag': bool,
}
# Dictionary containing the accessible attributes and types of the Bader class
properties = {
'density': property,
'reference': property,
'bader_charge': np.ndarray,
'bader_volume': np.ndarray,
'bader_spin': np.ndarray,
'bader_volumes': np.ndarray,
'bader_atoms': np.ndarray,
'bader_distance': np.ndarray,
'atoms_charge': np.ndarray,
'atoms_volume': np.ndarray,
'atoms_spin': np.ndarray,
'atoms_volumes': np.ndarray,
'atoms_surface_distance': np.ndarray,
}
def python_config(config_file=__config__, key='DEFAULT'):
"""Converts a profile in the config.ini file to the correct python type.
args:
config_file: the location of the config file
key: the name of the profile to load
returns:
dictionary representation of the config with evaluated values
"""
config = ConfigParser()
with open(config_file, 'r') as f:
config.read_file(f)
if not key in config:
print(" No config for {key} found")
python_config = {}
for k in config[key]:
if k in config_attributes:
try:
python_config[k] = literal_eval(config[key].get(k))
except ValueError as e:
if config_attributes[k] is str:
python_config[k] = config[key].get(k)
else:
raise e
else:
raise AttributeError(f" Unknown keyword in config.ini: {k}")
if not isinstance(python_config[k], config_attributes[k]):
e = f" {k} has wrong type: {type(k)} != {config_attributes[k]}"
if hasattr(python_config[k], '__iter__'):
for t in python_config[k]:
if not isinstance(t, config_attributes[k]):
raise TypeError(e)
else:
raise TypeError(e)
return python_config
class Bader:
f"""Class for easily running Bader calculations.
Loads default config from '{__config__}'
args:
density_dict: dictionary with any or all of the keys 'charge', 'spin'
the value associated with these keys should be an ndarray
of the respective density in (rho * lattice volume) units
lattice: the lattice of the periodic cell with lattice vectors on the
first axis and in cartesian coordinates
atoms: the coordinates, in cartesian, of the atoms in the cell
file_info: dictionary of information about the file read in, including
filename, file_type, prefix (directory path), voxel_offset
and write function
other keyword arguements accepted are listed in the __slots__ attribute
"""
__slots__ = [
*private_attributes.keys(),
*config_attributes.keys(),
*properties.keys(),
]
def __init__(self, density_dict, lattice, atoms, file_info, **kwargs):
"""Initialise the class with default config and then apply any kwargs.
"""
self._density = density_dict
self._lattice = lattice
self._atoms = atoms
self._file_info = file_info
self._dataframe = None
self.density = self.charge if self.charge is not None else self.spin
self.reference = self.density
self.load_config()
self.apply_config(kwargs)
@classmethod
def from_file(cls, filename, file_type=None, **kwargs):
"""Class method for initialising from file
args:
filename: the location of the file
file_type: the type of the file (useful if filename doesn't contain
obvious type)
other keyword arguments are file-type specific and are listed in the
__args__ variable in the respective module
"""
if file_type is not None:
file_type = file_type.lower()
for f_type, f_method in getmembers(io, ismodule):
if f_type == file_type:
io_ = f_method
file_conf = {k: v for k, v in kwargs.items() if k in io_.__args__}
return cls(*io_.read(filename, **file_conf), **kwargs)
else:
io_packages = (p for p in getmembers(io, ismodule)
if p[1].__extensions__ is not None)
for package in io_packages:
for ext in package[1].__extensions__:
io_ = package[1] if ext in filename.lower() else None
if io_ is not None:
file_conf = {
k: v for k, v in kwargs.items()
if k in io_.__args__
}
return cls(*io_.read(filename, **file_conf), **kwargs)
print(" No clear file type found; file will be read as chgcar.")
file_conf = {k: v for k, v in kwargs.items() if k in io.vasp.__args__}
return cls(*io.vasp.read(filename, **file_conf), **kwargs)
@classmethod
def from_dict(cls, d):
"""Create class entirely from dictonary.
"""
atoms = d.pop('_atoms')
lattice = d.pop('_lattice')
density = d.pop('_density')
file_info = d.pop('_file_info')
cls(density, lattice, atoms, file_info, **d)
@property
def as_dict(self):
"""Convert class to dictionary.
"""
d = {}
for key in self.__slots__:
try:
d[key] = getattr(self, key)
except AttributeError:
pass
return d
@property
def info(self):
"""Access the file_info dictionary.
"""
return self._file_info
@property
def charge(self):
"""Access the charge density in the density dictionary.
"""
return self._density.get('charge', None)
@property
def spin(self):
"""Access the spin density in the density dictionary.
"""
return self._density.get('spin', None)
@property
def spin_bool(self):
"""Whether to perfom on the spin density also.
This should only return true if spin is not None.
"""
return self.spin_flag if self.spin is not None else False
@spin_bool.setter
def spin_bool(self, flag):
"""Set the spin flag.
"""
self.spin_flag = flag
@property
def lattice(self):
"""Access the lattice describing the periodic cell.
"""
return self._lattice
@property
def lattice_volume(self):
"""Calculate the volume of the lattice.
"""
v = np.dot(self.lattice[0], np.cross(*self.lattice[1:]))
return np.abs(v)
@property
def distance_matrix(self):
"""Calculate a matrix of distances relating to steps of size index.
matrix is size 3x3x3 however index (2, 2, 2) is not a step of 2 in each
direction but instead a step of (-1, -1, -1).
"""
d = np.zeros((3, 3, 3, 3), dtype=np.float64)
d[1, :, :] += self.voxel_lattice[0]
d[2, :, :] -= self.voxel_lattice[0]
d[:, 1, :] += self.voxel_lattice[1]
d[:, 2, :] -= self.voxel_lattice[1]
d[:, :, 1] += self.voxel_lattice[2]
d[:, :, 2] -= self.voxel_lattice[2]
d = d**2
d = np.sum(d, axis=3)
d[d != 0] = d[d != 0]**-.5
return d
@property
def voxel_lattice(self):
"""A lattice desctibing the dimensions of a voxel.
"""
return np.divide(self.lattice, self.density.shape)
@property
def voxel_volume(self):
"""Calculate the volume of a single voxel.
"""
return self.lattice_volume / np.prod(self.density.shape)
@property
def voxel_offset(self):
"""The position of the charge described by the voxel to it's origin.
"""
return np.dot(self.voxel_offset_fractional, self.voxel_lattice)
@property
def voxel_offset_fractional(self):
"""The voxel offset in fractional coordinates w.r.t. it's own lattice.
"""
return self.info['voxel_offset']
@property
def T_grad(self):
"""The transform matrix for converting voxel step to gradient step.
"""
inv_l = np.linalg.inv(self.voxel_lattice)
return np.matmul(inv_l.T, inv_l)
@property
def atoms(self):
"""Access the atoms.
"""
return self._atoms
@atoms.setter
def atoms(self, array):
"""Set the atoms enforcing shape (len(atoms), 3).
"""
array = np.asarray(array).flatten()
array = array.reshape(array.shape[0] // 3, 3)
self._atoms = np.ascontiguousarray(array)
@property
def atoms_fractional(self):
"""Return the atoms in fractional coordinates.
"""
return np.dot(self.atoms, np.linalg.inv(self.lattice))
@property
def bader_maxima(self):
"""The location of the Bader maxima in cartesian coordinates.
"""
return np.dot(self.bader_maxima_fractional, self.lattice)
@bader_maxima.setter
def bader_maxima(self, maxima):
"""Set the location of the Bader maxima.
"""
maxima = np.add(maxima, self.voxel_offset_fractional)
maxima = np.divide(maxima, self.density.shape)
self._bader_maxima = np.ascontiguousarray(maxima)
@property
def bader_maxima_fractional(self):
"""Return the Bader maxima in fractional coordinates.
"""
try:
return self._bader_maxima
except AttributeError:
print(" ERROR: bader_maxima not yet set.")
return
@property
def vacuum_charge(self):
return getattr(self, '_vacuum_charge', 0.)
@vacuum_charge.setter
def vacuum_charge(self, value):
self._vacuum_charge = value
@property
def vacuum_volume(self):
return getattr(self, '_vacuum_volume', 0.)
@vacuum_volume.setter
def vacuum_volume(self, value):
self._vacuum_volume = value
@property
def dataframe(self):
if self._dataframe is None:
a = pd.Series(self.atoms_fractional[:, 0], name='a')
b = pd.Series(self.atoms_fractional[:, 1], name='b')
c = pd.Series(self.atoms_fractional[:, 2], name='c')
charge = pd.Series(self.atoms_charge, name='Charge')
volume = pd.Series(self.atoms_volume, name='Volume')
distance = | pd.Series(self.atoms_surface_distance, name='Distance') | pandas.Series |
import datetime
import re
import time
from decimal import Decimal
from functools import reduce
from typing import Iterable
import fitz
import pandas
import requests
from lxml import html
from requests.adapters import HTTPAdapter
from requests.cookies import cookiejar_from_dict
from bank_archive import Extractor, Downloader, StatementRow, MalformedError
REGEXP_WEBFORM = re.compile(
r"""WebForm_PostBackOptions\s*\(\s*["'](.*?)["'],\s*["'](.*?)["']"""
)
REGEXP_DISPOSITION_FILENAME = re.compile('filename="(.*?)"')
REGEXP_ACCOUNT_NUM = re.compile(r"N°([\s0-9a-z]+)")
class CaisseEpargneExtractor(Extractor):
COLUMNS = ["date", "description", "debit", "credit"]
@classmethod
def parse_date(cls, doc: fitz.Document, date: str):
st_year, st_month = map(
int,
re.search(r"RELEVES_.+?_([0-9]{4})([0-9]{2})[0-9]{2}", doc.name).groups(),
)
day, month = map(int, date.split("/", 1))
if st_month == 1 and month == 12:
# Fucking hell: dates in month 12 are for the previous year for the January statement.
st_year -= 1
return datetime.date(st_year, month, day)
@classmethod
def iter_starts(cls, doc: fitz.Document) -> Iterable[fitz.Rect]:
for page in doc:
for rects in cls.find_words_rect(page, "Date", "Détail", "Débit", "Crédit"):
# The account name is always slightly above the table head.
r = fitz.Rect().includeRect(rects[0]).includeRect(rects[-1])
r.y0 -= 22
r.y1 -= 12
# Margin for error as we want words to be fully inside the rect.
r.x0 -= 5
r.x1 += 5
account = " ".join(
w[4] for w in page.getText("words") if fitz.Rect(w[:4]) in r
)
account = REGEXP_ACCOUNT_NUM.search(account).group(1).strip()
yield page, account, rects
@classmethod
def iter_ends(cls, doc: fitz.Document) -> Iterable[fitz.Rect]:
for page in doc:
yield from ((page, True, r) for r in page.searchFor("NOUVEAU SOLDE"))
for pat in ("Perte ou vol", "Caisse d'Epargne et de Prévoyance"):
rect = next(iter(page.searchFor(pat)), None)
if rect:
rect.y0 -= 10
yield page, False, rect
break
@staticmethod
def _fix_start(start, end):
(date, det, deb, cred) = start
bottom = end.tl.y
deb.x0 -= 20
deb.x1 += 5
cred.x0 -= 20
cred.x1 += 5
date.includePoint(det.bl)
date.x0 -= 3
date.x1 -= 5
det.includePoint(deb.bl)
det.x0 -= 3
det.x1 -= 1
date.y1 = bottom
det.y1 = bottom
deb.y1 = bottom
cred.y1 = bottom
return date, det, deb, cred
@classmethod
def columns_x(cls, start: fitz.Rect, end: fitz.Rect):
date, det, deb, cred = cls._fix_start(start, end)
return [date.tl.x, det.tl.x, deb.tl.x, cred.tl.x]
@classmethod
def search_area(cls, start: fitz.Rect, end: fitz.Rect):
rects = cls._fix_start(start, end)
merged = reduce(lambda a, b: a.includeRect(b), rects, fitz.Rect())
return merged
@classmethod
def fix_table(cls, table):
if table.shape[1] < len(cls.COLUMNS):
raise MalformedError("table does not have enough columns")
if table.shape[1] > len(cls.COLUMNS):
extra = table.iloc[:, 2:-2]
table.iloc[:, 1] = table.iloc[:, 1].str.cat(extra, sep="\n", na_rep="")
table.drop(extra, inplace=True, axis=1)
columns = {c: new_name for c, new_name in zip(table, cls.COLUMNS)}
table.rename(columns=columns, inplace=True)
return table
@classmethod
def extract_rows(cls, table):
results = []
current: StatementRow = None
def parse_value(v):
return Decimal(v.replace(",", ".").replace(" ", ""))
for _, (date, descr, debit, credit) in table.iterrows():
if pandas.isna(descr):
continue
if pandas.isna(date):
# Continuation.
if not current:
# Heading garbage.
continue
if not pandas.isna(debit):
continue
if not pandas.isna(credit):
continue
description = current.description + "\n" + descr
current = StatementRow(current.date, description, current.value)
else:
# Header itself.
if date.strip().lower() == "date":
continue
if current:
results.append(current)
if | pandas.isna(debit) | pandas.isna |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from mpeds.open_ended_coders import *
from pkg_resources import resource_filename
class MPEDS:
def __init__(self):
''' Constructor. '''
self.hay_clf = None
self.hay_vect = None
self.form_clf = None
self.form_vect = None
self.issue_clf = None
self.issue_vect = None
self.issue_reg = None
self.target_clf = None
self.target_vect = None
self.size_clf = None
self.location_clf = None
self.smo_clf = None
def getLede(self, text):
'''
Get the lede sentence for the text. Splits on <br/>
:param text: text(s) to extracte lede from
:type text: string or pandas series of strings
:return: ledes
:rtype: pandas series
'''
def _first_sentence(text):
sentences = text.split("<br/>")
return sentences[0]
if isinstance(text, basestring):
text = | pd.Series(text) | pandas.Series |
from pathlib import Path
from typing import Optional, List
from pandas import DataFrame, to_datetime, read_csv
from pandas._libs.tslibs.timestamps import Timestamp
from timeseries_generator.external_factors.external_factor import ExternalFactor
MIN_DATE = | Timestamp("01-01-1960") | pandas._libs.tslibs.timestamps.Timestamp |
import math
import sys
import pandas as pd
import plotly.express as px
import os
import json
if __name__ == '__main__':
rootpath = ""
while not os.path.isdir(rootpath):
rootpath = input("Enter root of discord data: ") + "/messages"
timezone = input("Enter time Zone, empty for UTC (this wont be checked): ") or "UTC"
combined = pd.Series([])
channels = {}
channellist = []
guilds = {}
guildlist = []
for root, dirs, files in os.walk(rootpath):
for filename in files:
if filename == "channel.json":
with open(os.path.join(root, filename), 'r', encoding='UTF-8') as channelfile:
channeldata = json.load(channelfile)
if "guild" in channeldata:
channellist.append(channeldata)
guilds[channeldata["guild"]["id"]] = channeldata["guild"]
selection = None
i = 0
for guildid in guilds:
print("%d: %s"%(i + 1, guilds[guildid]["name"]))
guildlist.append(guilds[guildid])
i += 1
while selection == None:
try:
selection = guildlist[int(input("Select Guild Nr.: ")) - 1]["id"]
except:
()
print("calculating...")
i = 0
for channel in channellist:
if channel["guild"]["id"] == selection:
with open(os.path.join(rootpath, channel["id"], "messages.csv"), newline='') as csvfile:
try:
data = | pd.read_csv(csvfile, parse_dates=[1]) | pandas.read_csv |
# coding: utf-8
"""Mapping of production and consumption mixes in Europe and their effect on
the carbon footprint of electric vehicles
This code performs the following:
- Import data from ENTSO-E (production quantities, trades relationships)
- Calculates the production and consumption electricity mixes for European countries
- Calculates the carbon footprint (CF) for the above electricity mixes](#CF_el)
- Calculates the production, use-phase and end-of-life emissions for battery electric vehicles (BEVs) under
the following assumptions:](#BEV_calcs)
- Production in Korea (with electricity intensity 684 g CO2-eq/kWh)
- Use phase uses country-specific production and consumption mix
- End-of-life emissions static for all countries
Requires the following files for input:
- ENTSO_production_volumes.csv (from hybridized_impact_factors.py)
- final_emission_factors.csv (from hybridized_impact_factors.py)
- trades.csv (from hybridized_impact_factors.py)
- trade_ef_hv.csv (from hybridized_impact_factors.py)
- API_EG.ELC.LOSS.ZS_DS2_en_csv_v2_673578.csv (transmission losses, from OECD)
- car_specifications.xlsx
"""
import os
from datetime import datetime
import numpy as np
import pandas as pd
import country_converter as coco
import logging
#%% Main function
def run_calcs(run_id, year, no_ef_countries, export_data=True, include_TD_losses=True, BEV_lifetime=180000, ICEV_lifetime=180000, flowtrace_el=True, allocation=True, production_el_intensity=679, incl_ei=False, energy_sens=False):
"""Run all electricity mix and vehicle calculations and exports results."""
# Korean el-mix 679 g CO2/kWh, from ecoinvent
fp = os.path.curdir
production, trades, trade_ef, country_total_prod_disagg, country_total_cons_disagg, g_raw, C = load_prep_el_data(fp, year)
codecheck_file, elmixes, trade_only, country_el, CFEL, CFCI = el_calcs(flowtrace_el, run_id, fp, C, production, country_total_prod_disagg, country_total_cons_disagg, g_raw, trades, trade_ef, include_TD_losses, incl_ei, export_data) # Leontief electricity calculations
results_toSI, ICEV_total_impacts, ICEV_prodEOL_impacts, ICEV_op_int = BEV_calcs(fp, country_el, production, elmixes, BEV_lifetime, ICEV_lifetime, production_el_intensity, CFCI, allocation, energy_sens)
SI_fp = export_SI(run_id, results_toSI, production, trades, C, CFEL, no_ef_countries)
pickle_results(run_id, results_toSI, CFEL, ICEV_total_impacts, codecheck_file, export_data)
return results_toSI['BEV footprint'].xs('Consumption mix', level=1, axis=1), ICEV_prodEOL_impacts, ICEV_op_int, SI_fp
#%% Load and format data for calculations
def load_prep_el_data(fp, year):
"""Load electricity data and emissions factors."""
fp_output = os.path.join(fp, 'output')
# Output from bentso.py
filepath_production = os.path.join(fp_output, 'entsoe', 'ENTSO_production_volumes_'+ str(year) +'.csv')
filepath_intensities = os.path.join(fp_output, 'final_emission_factors_'+ str(year) +'.csv')
filepath_trades = os.path.join(fp_output, 'entsoe', 'trades_'+ str(year) +'.csv')
filepath_tradeonly_ef = os.path.join(fp_output, 'ecoinvent_ef_hv.csv')
# read in production mixes (annual average)
production = pd.read_csv(filepath_production, index_col=0)
production.rename_axis(index='', inplace=True)
# matrix of total imports/exports of electricity between regions; aka Z matrix
trades = pd.read_csv(filepath_trades, index_col=0)
trades.fillna(0, inplace=True) # replace np.nan with 0 for matrix math, below
# manually remove Cyprus for now
production.drop(index='CY', inplace=True)
trades = trades.drop(columns='CY').drop(index='CY')
imports = trades.sum(axis=0)
exports = trades.sum(axis=1)
""" Make into sum of production and production + import - export"""
country_total_prod_disagg = production.sum(axis=1)
country_total_cons_disagg = country_total_prod_disagg + imports - exports
waste = (production['Waste'] / production.sum(axis=1))
waste_min = waste[waste > 0].min()
waste_max = waste.max()
g_raw = production.sum(axis=1) # Vector of total electricity production (regionalized)
""" Read power plant CO2 intensities [tech averages] """
# average technology CO2 intensities (i.e., non-regionalized)
all_C = pd.read_csv(filepath_intensities, index_col=0)
all_C.drop(index='CY', inplace=True)
# use ecoinvent factors for these countries as a proxy to calculate consumption mixes for receiving countries
trade_ef = pd.read_csv(filepath_tradeonly_ef, index_col=[0, 1, 2, 3], header=[0])
trade_ef.index = trade_ef.index.droplevel([0, 1, 3]) # remove DSID, activityName and productName (leaving geography)
trade_ef.index.rename('geo', inplace=True)
trade_ef.columns = ['emission factor']
# Generate regionalized tech generation matrix
C = all_C.T
C.sort_index(axis=1, inplace=True)
C.sort_index(axis=0, inplace=True)
return production, trades, trade_ef, country_total_prod_disagg, country_total_cons_disagg, g_raw, C
#%% el_calcs
def el_calcs(flowtrace_el, run_id, fp, C, production, country_total_prod_disagg, country_total_cons_disagg, g_raw, trades, trade_ef, include_TD_losses, incl_ei, export_data):
fp_data = os.path.join(fp, 'data')
# Make list of full-country resolution
original_countries = list(production.index)
# Make list of aggregated countries (affects Nordic countries + GB (UK+NI))
# read 3-letter ISO codes
countries = list(trades.index)
""" Calculates national production mixes and consumption mixes using Leontief assumption """
# Start electricity calculations (ELFP.m)
# Calculate production and consumption mixes
# Carbon intensity of production mix
CFPI_no_TD = pd.DataFrame(production.multiply(C.T).sum(axis=1) / production.sum(axis=1), columns=['Production mix intensity']) # production mix intensity without losses
CFPI_no_TD.fillna(0, inplace=True)
# List of countries that have trade relationships, but no production data
trade_only = list(set(trades.index) - set(production.loc[production.sum(axis=1) > 0].index))
# Add ecoinvent proxy emission factors for trade-only countries
logging.info('Replacing missing production mix intensities with values from ecoinvent:')
for country in trade_only:
if CFPI_no_TD.loc[country, 'Production mix intensity'] == 0:
logging.info(country)
CFPI_no_TD.loc[country] = trade_ef.loc[country].values
i = country_total_cons_disagg.size # Number of European regions
g = g_raw
g = g.sort_index() # total generation vector (local production for each country)
total_imported = trades.sum(axis=0) # sum rows for total imports
total_exported = trades.sum(axis=1) # sum columns for total exports
y = total_imported + g - total_exported # total final demand (consumption) of electricity
q = g + total_imported # vector of total consumption
q.replace(np.nan, 0, inplace=True)
if flowtrace_el:
# For flow tracing approach: make Leontief production functions (normalize columns of A)
# normalized trade matrix quadrant
Atmx = pd.DataFrame(np.matmul(trades, np.linalg.pinv(np.diag(q))))
# normalized production matrix quadrant
Agen = pd.DataFrame(np.diag(g) * np.linalg.pinv(np.diag(q)), index=countries, columns=countries) # coefficient matrix, generation
# "Trade" Leontief inverse
# Total imports from region i to j per unit demand on j
Ltmx = pd.DataFrame(np.linalg.pinv(np.identity(i) - Atmx), trades.columns, trades.index)
# Production in country i for trade to country j
# Total generation in i (rows) per unit demand j
Lgen = pd.DataFrame(np.matmul(Agen, Ltmx), index=Agen.index, columns=Ltmx.columns)
y_diag = pd.DataFrame(np.diag(y), index=countries, columns=countries)
# total imports for given demand
Xtmx = pd.DataFrame(np.matmul(np.linalg.pinv(np.identity(i) - Atmx), y_diag))
# Total generation to satisfy demand (consumption)
Xgen = np.matmul(np.matmul(Agen, Ltmx), y_diag)
Xgen.sum(axis=0)
Xgen_df = pd.DataFrame(Xgen, index=Agen.index, columns=y_diag.columns)
# ### Check electricity generated matches demand
totgen = Xgen.sum(axis=0)
r_gendem = totgen / y # All countries should be 1
#%% Generation techonlogy matrix
# TC is a country-by-generation technology matrix - normalized to share of total domestic generation, i.e., normalized generation/production mix
# technology generation, kWh/ kWh domestic generated electricity
TC = pd.DataFrame(np.matmul(np.linalg.pinv(np.diag(g)), production), index=g.index, columns=production.columns)
TCsum = TC.sum(axis=1) # Quality assurance - each country should sum to 1
# Calculate technology generation mix in GWh based on production in each region
TGP = pd.DataFrame(np.matmul(TC.transpose(), np.diag(g)), index=TC.columns, columns=g.index) #.== production
# Carbon intensity of consumption mix
CFCI_no_TD = pd.DataFrame(np.matmul(CFPI_no_TD.T.values, Lgen), columns=CFPI_no_TD.index).T
else:
# Use grid-average assumption for trade
prod_emiss = production.multiply(C.T).sum(axis=1)
trade_emiss = (pd.DataFrame(np.diag(CFPI_no_TD.iloc(axis=1)[0]), index=CFPI_no_TD.index, columns=CFPI_no_TD.index)).dot(trades)
CFCI_no_TD = pd.DataFrame((prod_emiss + trade_emiss.sum(axis=0) - trade_emiss.sum(axis=1)) / y)
CFCI_no_TD.columns = ['Consumption mix intensity']
# use ecoinvent for missing countries
if incl_ei:
CFCI_no_TD.update(trade_ef.rename(columns={'emission factor':'Consumption mix intensity'}))
#%% Calculate losses
# Transpose added after removing country aggregation as data pre-treatment
if include_TD_losses:
# Calculate technology characterization factors including transmission and distribution losses
# First, read transmission and distribution losses, downloaded from World Bank economic indicators (most recent values from 2014)
if isinstance(include_TD_losses, float):
TD_losses = include_TD_losses # apply constant transmission and distribution losses to all countries
elif isinstance(include_TD_losses, bool):
losses_fp = os.path.join(fp_data, 'API_EG.ELC.LOSS.ZS_DS2_en_csv_v2_673578.csv')
try:
TD_losses = pd.read_csv(losses_fp, skiprows=[0,1,2,3], usecols=[1, 58], index_col=0)
TD_losses = TD_losses.iloc[:, -7:].dropna(how='all', axis=1)
TD_losses = TD_losses.apply(lambda x: x / 100 + 1) # convert losses to a multiplicative factor
# ## Calculate total national carbon emissions from el - production and consumption mixes
TD_losses.index = coco.convert(names=TD_losses.index.tolist(), to='ISO2', not_found=None)
TD_losses = TD_losses.loc[countries]
TD_losses = pd.Series(TD_losses.iloc[:, 0])
except:
print("Warning! Transmission and distribution losses input files not found!")
TD_losses = pd.Series(np.zeros(len(production.index)), index=production.index)
else:
print('invalid entry for losses')
# Caclulate carbon intensity of production and consumption mixes including losses
CFPI_TD_losses = CFPI_no_TD.multiply(TD_losses, axis=0).dropna(how='any', axis=0) # apply transmission and distribution losses to production mix intensity
CFCI_TD_losses = CFCI_no_TD.multiply(TD_losses, axis=0).dropna(how='any', axis=0)
if len(CFCI_TD_losses) < len(CFPI_TD_losses):
CFCI_TD_losses = CFCI_no_TD.multiply(TD_losses, axis=0)
CFPI = CFPI_TD_losses
CFCI = CFCI_TD_losses
else:
CFPI = CFPI_no_TD
CFCI = CFCI_no_TD
elmixes = (CFPI.copy()).join(CFCI.copy()).T
#%%
# Aggregate multi-nodes to single countries using weighted average of production/consumption as appropriate
country_total_prod_disagg.columns = ["Total production (TWh)"]
country_total_prod_disagg.index = original_countries
country_total_cons_disagg.columns = ["Total consumption (TWh)"]
country_total_cons_disagg.index = original_countries
country_el = pd.concat([country_total_prod_disagg, country_total_cons_disagg], axis=1)
country_el.columns = ['Total production (TWh)', 'Total consumption (TWh)']
CFEL_mixes = elmixes.T
CFEL = pd.concat([country_el, CFEL_mixes], axis=1)
imports = trades.sum(axis=0)
exports = trades.sum(axis=1)
CFEL['Trade percentage, gross'] = (imports + exports) / CFEL['Total production (TWh)']
CFEL['Import percentage'] = imports / CFEL['Total production (TWh)']
CFEL['Export percentage'] = exports / CFEL['Total production (TWh)']
CFEL['imports'] = imports
CFEL['exports'] = exports
#Calculate total carbon footprint intensity ratio production vs consumption
rCP = CFCI['Consumption mix intensity'].divide(CFPI['Production mix intensity'])
rCP.columns = ["ratio consumption:production mix"]
# Export intermediate variables from calculations for troubleshooting
if export_data:
keeper = run_id + "{:%d-%m-%y, %H_%M}".format(datetime.now())
fp_results = os.path.join(fp, 'results')
codecheck_file = os.path.join(os.path.abspath(fp_results), 'code_check_' + keeper + '.xlsx')
writer = pd.ExcelWriter(codecheck_file)
g.to_excel(writer, "g")
q.to_excel(writer, "q")
y.to_excel(writer, 'y')
if flowtrace_el:
Atmx.to_excel(writer, "Atmx")
Agen.to_excel(writer, "Agen")
Ltmx.to_excel(writer, "LTmx")
Lgen.to_excel(writer, "Lgen")
Xtmx.to_excel(writer, "Xtmx")
TGP.to_excel(writer, "TGP")
CFPI.T.to_excel(writer, "CFPI")
CFCI.T.to_excel(writer, "CFCI")
rCP.to_excel(writer, "rCP")
C.T.to_excel(writer, "C")
writer.save()
return codecheck_file, elmixes, trade_only, country_el, CFEL, CFCI
#%%
def BEV_calcs(fp, country_el, production, elmixes, BEV_lifetime, ICEV_lifetime, production_el_intensity, CFCI, allocation=True, energy_sens=False):
"""Calculate BEV lifecycle emissions."""
# First, setup calculations
# read in data
fp_data = os.path.join(fp, 'data')
vehicle_fp = os.path.join(fp_data, 'car_specifications.xlsx')
cars = | pd.read_excel(vehicle_fp, sheet_name='veh_emiss', index_col=[0, 1, 2], usecols='A:G') | pandas.read_excel |
# Feb 2019
# <NAME>, <NAME>, <NAME>, <NAME>
#
# This script is the summary function in the
import numpy as np
import pandas as pd
from KMediansPy.distance import distance
def summary(x:np.ndarray, medians:np.ndarray, labels:np.ndarray) -> pd.DataFrame:
"""
Generates a table to display the cluster labels, the x and y coordinates of the cluster medians,
number of points in each cluster, the average distance within the cluster,
the maximum distance within the cluster and the minimum distance within the cluster.
Parameters
----------
x: 2D array of order mx2
dataset, must only be 2D (x & y coordinates)
medians: 2D array
X and y coordinates of each cluster median
labels: 1D array
Array with the assignment of the cluster for each set of point in the dataset
Returns
-------
summary dataframe
Returns a dataframe with 7 columns and the number of rows will match the number of clusters. The labels of the columns:
Cluster labels, X Coordinates of Final Medians, Y Coordinates of Final Medians, Number of Points in a Cluster, Average Distance within Cluster,
Minimum Distance within Cluster, Maximum Distance within Cluster
"""
# raises typeerror for each inputs
if not isinstance(x, np.ndarray):
raise TypeError("x is not an array")
if not isinstance(medians, np.ndarray):
raise TypeError("medians is not an array")
if not isinstance(labels, np.ndarray):
raise TypeError("labels is not an array")
# raises index error if the inputs are empty
if x.shape[0] == 0:
raise IndexError("x is empty")
if medians.shape[0] == 0:
raise IndexError("There are no medians coordinates")
if labels.shape[0] == 0:
raise IndexError("There are no labels for your clusters")
# raises index error if there is the dimensions are not 2 for x and medians
if x.ndim > 2:
raise IndexError("x has too many dimensions")
if x.ndim == 1:
raise IndexError("x needs second dimension")
if medians.ndim > 2:
raise IndexError("Medians has too many dimensions")
if medians.ndim == 1:
raise IndexError("Medians needs second dimension")
# raises index error if there is not an Y coordinate for x and medians
if x.shape[1] != 2:
raise IndexError("x is missing Y coordinate")
if medians.shape[1] != 2:
raise IndexError("medians is missing Y coordinate")
# cluster labels for table
cluster_labels = np.unique(labels)
#x & y coordinates for table
xcoor_medians = []
ycoor_medians = []
for i in medians:
xcoor_medians.append(i[0])
ycoor_medians.append(i[1])
# create dict to get label with index of position
dict_label_index = {}
for i in cluster_labels:
dict_label_index[i] = [index for index, value in enumerate(labels) if value == i]
# intilize empty lists
number_points = []
avg_distance = []
max_distance = []
min_distance = []
for i in cluster_labels:
# loop into the cluster to get number of points, max, min, and average (of a single cluster)
new_list = []
# loop to get the index values for the cluster
for j in dict_label_index[i]:
new_list.append(x[j])
myarray = np.array(new_list)
med_dat =np.array([medians[i]])
# calculate distance between median and points in cluster
dist_test = distance(myarray, med_dat)
# calculate average, max and min
avg_distance.append(np.mean(dist_test))
max_distance.append(np.max(dist_test))
min_distance.append(np.min(dist_test))
# count the number of points in each cluster
number_points.append(len(dict_label_index[i]))
# generate the dataframe to print to screen
df_data = {"Cluster Label":cluster_labels,
"X Coordinates of Final Medians":xcoor_medians,
"Y Coordinates of Final Medians":ycoor_medians,
"Number of Points in a Cluster":number_points,
"Average Distance within Cluster":avg_distance,
"Maxiumum Distance within Cluster":max_distance,
"Minimum Distance within Cluster":min_distance}
output_df = | pd.DataFrame(data=df_data) | pandas.DataFrame |
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from collections import defaultdict
import random as r
import math as m
import numpy as np
from keras import backend as K
from random import Random
import pandas as pd
from keras.preprocessing import sequence
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Embedding, LSTM, Bidirectional, Concatenate
from keras.layers import Input, Lambda
from keras.optimizers import Adam
from keras.optimizers import RMSprop
from sklearn.model_selection import train_test_split
from keras.layers.merge import concatenate
import sys, getopt
import os
MAXLEN=50
SEED=314159
LSTM_DIM=32
DROPOUT=0.2
EPOCHS=2
BATCH_SIZE=1000
class RCTArm:
def __init__(self, line, index):
l = line.strip().split("\t")
self.id = index
self.text = l[0]
self.ov = float(l[1])
def __str__(self):
return 'RCT:({}) OV: {}'.format(self.text, self.ov)
class RCTArm:
def __init__(self, line, index):
l = line.strip().split("\t")
self.id = index
self.text = l[0]
self.ov = float(l[1])
def __str__(self):
return 'RCT:({}) OV: {}'.format(self.text, self.ov)
class RCTArms:
def __init__(self, train_file):
self.rcts = []
line_number = 0
for line in open(train_file):
rct_arm = RCTArm(line, line_number)
self.rcts.append(rct_arm)
line_number += 1
def convertWordsToIds(self, maxlen=MAXLEN):
self.maxlen = maxlen
all_text = []
for rct in self.rcts:
all_text.append(rct.text)
self.keras_tokenizer = Tokenizer(num_words=None, filters=[], lower=False, split=' ')
self.keras_tokenizer.fit_on_texts(all_text)
self.vsize = len(self.keras_tokenizer.word_index) + 1
self.x = self.keras_tokenizer.texts_to_sequences(all_text)
self.x = pad_sequences(self.x, padding='post', maxlen=maxlen)
def create_embedding_matrix(self, embfile):
in_dict = 0
with open(embfile) as f:
line_number=0
for line in f:
if line_number==0:
tokens = line.strip().split(" ")
self.embedding_dim = int(tokens[1])
self.embedding_matrix = np.zeros((self.vsize, self.embedding_dim))
else:
word, *vector = line.split()
if word in self.keras_tokenizer.word_index:
idx = self.keras_tokenizer.word_index[word]
in_dict += 1
self.embedding_matrix[idx] = np.array(
vector, dtype=np.float32)[:self.embedding_dim]
line_number += 1
return in_dict
def getData(self, index):
return self.x[index]
def create_pairs(self):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
pairs = []
labels = []
N = len(self.rcts)
for i in range (N-1):
for j in range(i+1,N):
pairs.append([self.x[i], self.x[j]])
label = 0
if self.rcts[i].ov >= self.rcts[j].ov:
label = 1
labels.append(label)
return np.array(pairs), np.array(labels)
# Cartesian product between two sets
def create_pairs_from_ids(self, listA, listB):
pairs = []
labels = []
seen_pairs = {}
self_pair_count = 0
seen_pair_count = 0
for a in listA:
for b in listB:
'''
Avoid self-pairs; also ignore the order
'''
if a==b:
self_pair_count+=1
continue
key = str(a) + ' ' + str(b)
revkey = str(b) + ' ' + str(a)
if not key in seen_pairs and not revkey in seen_pairs:
seen_pairs[key] = True
seen_pairs[revkey] = True
else:
seen_pair_count+=1
continue # this pair has already been added
pairs.append([self.x[a], self.x[b]])
label = 0
if self.rcts[a].ov >= self.rcts[b].ov:
label = 1
labels.append(label)
print ('Ignored {} (self) and {} (seen) duplicate pairs'.format(self_pair_count, seen_pair_count))
return pairs, labels
def create_split_aware_pairs(self, train_ratio=0.9):
#First split the rcts into train and test
r = Random(SEED)
r.shuffle(self.rcts)
ids = list(map(lambda r: r.id, self.rcts)) # list of shuffled ids
ntrain = int(len(ids)*train_ratio)
train_ids = ids[0:ntrain]
test_ids = ids[ntrain:]
#collect all pairs from the train ids
train_pairs, train_labels = self.create_pairs_from_ids(train_ids, train_ids)
#collect all pairs from the test ids - complete subgraph
self_test_pairs, self_test_labels = self.create_pairs_from_ids(test_ids, test_ids)
# additionally build the cross-pairs, (test, train) pairs
cross_test_pairs, cross_test_labels = self.create_pairs_from_ids(test_ids, train_ids)
test_pairs = self_test_pairs + cross_test_pairs
test_labels = self_test_labels + cross_test_labels
return np.array(train_pairs), np.array(train_labels), np.array(test_pairs), np.array(test_labels)
def complete_model(rcts):
input_a = Input(shape=(rcts.maxlen, ))
print (input_a.shape)
emb_a = Embedding(rcts.embedding_matrix.shape[0],
rcts.embedding_matrix.shape[1],
weights=[rcts.embedding_matrix])(input_a)
print (emb_a.shape)
input_b = Input(shape=(rcts.maxlen, ))
print (input_b.shape)
emb_b = Embedding(input_dim=rcts.embedding_matrix.shape[0],
output_dim=rcts.embedding_matrix.shape[1],
weights=[rcts.embedding_matrix])(input_b)
print (emb_b.shape)
shared_lstm = LSTM(LSTM_DIM)
# because we re-use the same instance `base_network`,
# the weights of the network
# will be shared across the two branches
processed_a = shared_lstm(emb_a)
processed_a = Dropout(DROPOUT)(processed_a)
processed_b = shared_lstm(emb_b)
processed_b = Dropout(DROPOUT)(processed_b)
merged_vector = concatenate([processed_a, processed_b], axis=-1)
# And add a logistic regression (2 class - sigmoid) on top
# used for backpropagating from the (pred, true) labels
predictions = Dense(1, activation='sigmoid')(merged_vector)
model = Model([input_a, input_b], outputs=predictions)
return model
def main(argv):
DATA_FILE = None
EMB_FILE = None
try:
opts, args = getopt.getopt(argv,"h:d:n:", ["datafile=", "nodevecs="])
for opt, arg in opts:
if opt == '-h':
print ('eval_cvfold.py -d/--datafile= <datafile> -n/--nodevecs= <nodevecs>')
sys.exit()
elif opt in ("-d", "--datafile"):
DATA_FILE = arg
elif opt in ("-n", "--nodevecs"):
EMB_FILE = arg
except getopt.GetoptError:
print ('usage: eval_cvfold.py -d <datafile> -n <nodevecs>')
sys.exit()
if DATA_FILE == None or EMB_FILE == None:
print ('usage: eval_cvfold.py -d <datafile> -n <nodevecs> -m <r/c/m>')
sys.exit()
print ("Data file: %s" % (DATA_FILE))
print ("Emb file: %s" % (EMB_FILE))
rcts = RCTArms(DATA_FILE)
rcts.convertWordsToIds()
# Load embeddings from dictionary
nwords_in_dict = rcts.create_embedding_matrix(EMB_FILE)
# Print Vocab overlap
#nonzero_elements = np.count_nonzero(np.count_nonzero(rcts.embedding_matrix, axis=1))
#print(nonzero_elements / rcts.vsize)
print ('#words in vocab: {}'.format(nwords_in_dict))
x_train, y_train, x_test, y_test = rcts.create_split_aware_pairs()
print ("#Train pairs: {}".format(x_train.shape[0]))
print ("#Test pairs: {}".format(x_test.shape[0]))
freqs = | pd.Series(y_train) | pandas.Series |
#!/usr/bin/env python3
# python3.6
# ref link: https://www.jianshu.com/p/91c98585b79b
import matplotlib.pyplot as plt
import os
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
import argparse
def DE(fi, wt, ko):
prefix = fi.split('.')[0]
data = pd.read_table(fi, header=0, index_col=0)
data = np.log2(data+1)
# Boxplot of the expression data
color = {'boxes': 'DarkGreen', 'whiskers': 'DarkOrange', 'medians': 'DarkBlue', 'caps': 'Gray'}
data.plot(kind='box', color=color, sym='r.', title=prefix)
plt.xticks(rotation=40)
plt.xlabel('Samples', fontsize=15)
plt.ylabel('log2(CPM+1)', fontsize=15)
out_box = prefix + "_boxplot.pdf"
plt.savefig(out_box, bbox_inches='tight')
plt.close()
# Density plot of the expression data
data.plot(kind='density', title=prefix)
#data.plot.kde()
out_density = prefix + "_density.pdf"
plt.savefig(out_density)
plt.close()
#####
wt = wt.split(':')
wt1 = int(wt[0])
wt2 = int(wt[1])
ko = ko.split(':')
ko1 = int(ko[0])
ko2 = int(ko[1])
# The mean expression of wt samples for each genes
wt = data.iloc[:, wt1:wt2].mean(axis=1)
# The mean expression of ko samples for each genes
ko = data.iloc[:, ko1:ko2].mean(axis=1)
# FoldChange, ko vs wt
foldchange = ko - wt
# P value
pvalue = []
gene_number = len(data.index)
for i in range(0, gene_number):
ttest = stats.ttest_ind(data.iloc[i,wt1:wt2], data.iloc[i, ko1:ko2])
pvalue.append(ttest[1])
### vocano plot
pvalue_arr = np.asarray(pvalue)
result = | pd.DataFrame({'pvalue': pvalue_arr, 'FoldChange': foldchange}) | pandas.DataFrame |
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import numpy as np, tensorflow as tf
from sklearn.preprocessing import OneHotEncoder
import os
import csv
import gc
from sklearn.metrics import mean_squared_error
import math
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel, RBF
from sklearn.gaussian_process.kernels import RationalQuadratic
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedKFold
from sklearn import linear_model
from xgboost.sklearn import XGBRegressor
import copy
import pyflux as pf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
PRICED_BITCOIN_FILE_PATH = "C:/Users/wang.yuhao/Documents/CoinWorks-master/data/pricedBitcoin2009-2018.csv"
DAILY_OCCURRENCE_FILE_PATH = "C:/Users/wang.yuhao/Downloads/CoinWorks-master/CoinWorks-master/data/dailyOccmatrices/"
ROW = -1
COLUMN = -1
TEST_SPLIT = 0.01
ALL_YEAR_INPUT_ALLOWED = False
YEAR = 2017
# Baseline
from sklearn.metrics import mean_squared_error
from sklearn import metrics
import matplotlib.pyplot as plt
def exclude_days(train, test):
row, column = train.shape
train_days = np.asarray(train[:, -1]).reshape(-1, 1)
x_train = train[:, 0:column - 1]
test_days = np.asarray(test[:, -1]).reshape(-1, 1)
x_test = test[:, 0:column - 1]
return x_train, x_test, train_days, test_days
def merge_data(occurrence_data, daily_occurrence_normalized_matrix, aggregation_of_previous_days_allowed):
if(aggregation_of_previous_days_allowed):
if(occurrence_data.size==0):
occurrence_data = daily_occurrence_normalized_matrix
else:
occurrence_data = np.add(occurrence_data, daily_occurrence_normalized_matrix)
else:
if(occurrence_data.size == 0):
occurrence_data = daily_occurrence_normalized_matrix
else:
occurrence_data = np.concatenate((occurrence_data, daily_occurrence_normalized_matrix), axis=1)
#print("merge_data shape: {} occurrence_data: {} ".format(occurrence_data.shape, occurrence_data))
return occurrence_data
def get_normalized_matrix_from_file(day, year, totaltx):
daily_occurrence_matrix_path_name = DAILY_OCCURRENCE_FILE_PATH + "occ" + str(year) + '{:03}'.format(day) + '.csv'
daily_occurence_matrix = pd.read_csv(daily_occurrence_matrix_path_name, sep=",", header=None).values
#print("daily_occurence_matrix.size: ", daily_occurence_matrix.size, daily_occurence_matrix.shape)
#print("np.asarray(daily_occurence_matrix): ",np.asarray(daily_occurence_matrix))
#print("np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size): ",np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size), np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size).shape, np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size).size)
#print("totaltx: ",totaltx)
#print("np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)/totaltx: ",np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)/totaltx)
return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)/totaltx
def get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
#print("priced_bitcoin: ", priced_bitcoin, priced_bitcoin.shape)
#print("current_row: ", current_row, current_row.shape)
previous_price_data = np.array([], dtype=np.float32)
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
#print("previous_price_data: ", previous_price_data,row['day'], row['year'], row['totaltx'])
#print("occurrence_data: ", occurrence_data)
if(is_price_of_previous_days_allowed):
#print("previous_price_data: ", np.asarray(previous_price_data).reshape(1, -1), np.asarray(previous_price_data).reshape(1, -1).shape)
occurrence_data = np.asarray(previous_price_data).reshape(1, -1)
occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1)
#print("current_row: ", current_row, current_row.shape)
#print(" price occurrence_input: ", np.asarray(current_row['price']).reshape(1,1), (np.asarray(current_row['price']).reshape(1,1)).shape)
#print("concatenate with price occurrence_input: ", occurrence_input, occurrence_input.shape)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
#print(" price occurrence_input: ", np.asarray(current_row['day']).reshape(1,1), (np.asarray(current_row['day']).reshape(1,1)).shape)
#print("concatenate with day occurrence_input: ", occurrence_input, occurrence_input.shape)
return occurrence_input
def rf_base_rmse_mode(train_input, train_target, test_input, test_target):
rf_regression = RandomForestRegressor(max_depth=2, random_state=0)
rf_regression.fit(train_input, train_target.ravel() )
predicted = rf_regression.predict(test_input)
rf_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, predicted))
return rf_base_rmse
def gp_base_rmse_mode(train_input, train_target, test_input, test_target):
param = {
'kernel': RationalQuadratic(alpha=0.0001, length_scale=1),
'n_restarts_optimizer': 2
}
adj_params = {'kernel': [RationalQuadratic(alpha=0.0001, length_scale=1), RationalQuadratic(alpha=0.001, length_scale=1), RationalQuadratic(alpha=0.01,length_scale=1), RationalQuadratic(alpha=0.1, length_scale=1), RationalQuadratic(alpha=1, length_scale=1), RationalQuadratic(alpha=10, length_scale=1),
RationalQuadratic(alpha=0.0001, length_scale=1), RationalQuadratic(alpha=0.001, length_scale=1), RationalQuadratic(alpha=0.01,length_scale=1), RationalQuadratic(alpha=0.1, length_scale=1), RationalQuadratic(alpha=1, length_scale=1), RationalQuadratic(alpha=10, length_scale=1)],
'n_restarts_optimizer': [2]}
gpr = GaussianProcessRegressor(**param)
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
cscv = GridSearchCV(gpr, adj_params, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
cscv.fit(train_input,train_target)
#print("cv_results_:",cscv.cv_results_)
#print("best_params_: ",cscv.best_params_)
gpr = GaussianProcessRegressor(**cscv.best_params_)
gpr.fit(train_input, train_target)
mu, cov = gpr.predict(test_input, return_cov=True)
test_y = mu.ravel()
#uncertainty = 1.96 * np.sqrt(np.diag(cov))
gp_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, test_y))
print(gp_base_rmse)
return gp_base_rmse
def enet_base_rmse_mode(train_input, train_target, test_input, test_target):
param = {
'alpha': 0.0001,
'l1_ratio': 0.001,
}
elastic = linear_model.ElasticNet(**param)
adj_params = {'alpha': [0.0001, 0.001, 0.01, 0.1, 1, 10],
'l1_ratio': [0.001, 0.005 ,0.01, 0.05, 0.1, 0.5, 1]}
#'max_iter': [100000]}
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
cscv = GridSearchCV(elastic, adj_params, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
cscv.fit(train_input, train_target)
elastic= linear_model.ElasticNet(**cscv.best_params_)
elastic.fit(train_input,train_target.ravel())
predicted = elastic.predict(test_input)
enet_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, predicted))
print("enet_base_rmse: ", enet_base_rmse)
#print ("RMSE:", np.sqrt(metrics.mean_squared_error(test_target, predicted)))
return enet_base_rmse
def xgbt_base_rmse_mode(train_input, train_target, test_input, test_target):
param = {
'n_estimators':10,
'learning_rate': 0.01,
}
adj_params = {
'n_estimators':[10,50,100,200,300,400,500,1000],
'learning_rate': [0.01, 0.1, 1]
}
xgbt = XGBRegressor(**param)
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
cscv = GridSearchCV(xgbt, adj_params, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
cscv.fit(train_input, train_target)
xgbt= XGBRegressor(**cscv.best_params_)
xgbt.fit(train_input,train_target.ravel())
predicted = xgbt.predict(test_input)
xgbt_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, predicted))
print("xgbt_base_rmse: ", xgbt_base_rmse)
#print ("RMSE:", np.sqrt(metrics.mean_squared_error(test_target, predicted)))
return xgbt_base_rmse
def arimax_initialize_setting(window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
data = preprocess_data(window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
train = data[0:100, :]
test = data[100:100+prediction_horizon, :]
x_train, x_test, train_days, test_days = exclude_days(train, test)
row, column = x_train.shape
train_target = np.asarray(x_train[:, -1]).reshape(-1)
train_input = x_train[:, 0:column - 1]
test_target = x_test[: , -1]
test_input = x_test[ : , 0:column - 1]
return train_input, train_target, test_input, test_target, train_days, test_days
def arimax_base_rmse_mode(train_input, train_target, test_input, test_target):
train_input_diff_arr = np.array([])
train_columns_name = []
train_input_column = int(train_input.shape[1])
for i in range(train_input_column):
if(i%2==0):
train_columns_name.append('price_' + str(i))
else:
train_columns_name.append('totaltx_' + str(i))
train_input_diff = np.diff(train_input[:,i] )
if i == 0:
train_input_diff_arr = train_input_diff
else:
train_input_diff_arr = np.dstack((train_input_diff_arr, train_input_diff))
columns_name = copy.deepcopy(train_columns_name)
columns_name.append('current_price')
train_target_diff = np.diff(train_target )
train_input_diff_arr = np.dstack((train_input_diff_arr, train_target_diff))
train_input_diff_arr = pd.DataFrame(train_input_diff_arr[0], columns = columns_name)
model = pf.ARIMAX(data=train_input_diff_arr,formula="current_price~totaltx_5",ar=2,ma=2,integ=0)
model_1 = model.fit("MLE")
model_1.summary()
test_input_pd = pd.DataFrame(test_input, columns = train_columns_name)
test_target_pd = pd.DataFrame(test_target, columns = ['current_price'])
test_input_target = pd.concat([test_input_pd, test_target_pd], axis=1)
pred = model.predict(h=test_input_target.shape[0],
oos_data=test_input_target,
intervals=True, )
arimax_base_rmse = mean_squared_error([test_input_target.iloc[0, 6]],[(train_target[99])+pred.current_price[99]])
print("arimax_base_rmse:",arimax_base_rmse)
return arimax_base_rmse
def run_print_model(train_input, train_target, test_input, test_target, train_days, test_days):
rf_base_rmse = rf_base_rmse_mode(train_input, train_target, test_input, test_target)
xgbt_base_rmse = xgbt_base_rmse_mode(train_input, train_target, test_input, test_target)
gp_base_rmse = gp_base_rmse_mode(train_input, train_target, test_input, test_target)
enet_base_rmse = enet_base_rmse_mode(train_input, train_target, test_input, test_target)
return rf_base_rmse, xgbt_base_rmse, gp_base_rmse, enet_base_rmse
#print_results(predicted, test_target, original_log_return, predicted_log_return, cost, test_days, rmse)
#return rf_base_rmse
def preprocess_data(window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
priced_bitcoin = pd.read_csv(PRICED_BITCOIN_FILE_PATH, sep=",")
if(ALL_YEAR_INPUT_ALLOWED):
pass
else:
priced_bitcoin = priced_bitcoin[priced_bitcoin['year']==YEAR].reset_index(drop=True)
# get normalized occurence matrix in a flat format and merge with totaltx
daily_occurrence_input = np.array([],dtype=np.float32)
temp = np.array([], dtype=np.float32)
for current_index, current_row in priced_bitcoin.iterrows():
if(current_index<(window_size+prediction_horizon-1)):
pass
else:
start_index = current_index - (window_size + prediction_horizon) + 1
end_index = current_index - prediction_horizon
temp = get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
#print("1st temp: ", temp, temp.shape)
if(daily_occurrence_input.size == 0):
daily_occurrence_input = temp
else:
#print("daily_occurrence_input: ", daily_occurrence_input, daily_occurrence_input.shape)
#print("temp: ", temp, temp.shape)
daily_occurrence_input = np.concatenate((daily_occurrence_input, temp), axis=0)
#print("return daily_occurrence_input:", daily_occurrence_input, daily_occurrence_input.shape)
#if current_index == 108:
#print("daily_occurrence_input: ", daily_occurrence_input, daily_occurrence_input.shape)
return daily_occurrence_input
def initialize_setting(window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
data = preprocess_data(window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
#train, test = train_test_split(data, test_size=TEST_SPLIT)
#data = pd.DataFrame(data)
train = data[0:100, :]
test = data[100, :].reshape(1, -1)
#print(" train, test shape",train.shape, test.shape)
#print(" train, test",train, test)
x_train, x_test, train_days, test_days = exclude_days(train, test)
#print("x_train:", x_train)
row, column = x_train.shape
train_target = np.asarray(x_train[:, -1]).reshape(-1)
train_input = x_train[:, 0:column - 1]
#x_test = x_test.reshape(-1,1)
test_target = x_test[: , -1]
test_input = x_test[ : , 0:column - 1]
return train_input, train_target, test_input, test_target, train_days, test_days
parameter_dict = {#0: dict({'is_price_of_previous_days_allowed':True, 'aggregation_of_previous_days_allowed':True})}
1: dict({'is_price_of_previous_days_allowed':True, 'aggregation_of_previous_days_allowed':False})}
for step in parameter_dict:
gc.collect()
evalParameter = parameter_dict.get(step)
is_price_of_previous_days_allowed = evalParameter.get('is_price_of_previous_days_allowed')
aggregation_of_previous_days_allowed = evalParameter.get('aggregation_of_previous_days_allowed')
print("IS_PRICE_OF_PREVIOUS_DAYS_ALLOWED: ", is_price_of_previous_days_allowed)
print("AGGREGATION_OF_PREVIOUS_DAYS_ALLOWED: ", aggregation_of_previous_days_allowed)
window_size_array = [3, 5, 7]
horizon_size_array = [1, 2, 5, 7, 10, 15, 20, 25, 30]
for window_size in window_size_array:
print('WINDOW_SIZE: ', window_size)
rmse_array = []
for prediction_horizon in horizon_size_array:
print("PREDICTION_HORIZON: ", prediction_horizon)
train_input, train_target, test_input, test_target, train_days, test_days = initialize_setting(window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
#print("train_input, train_target: ",train_input, train_target, train_input.shape, train_target.shape)
#print("test_input, test_target",test_input, test_target, test_input.shape, test_target.shape)
#print("train_days, test_days: ",train_days, test_days)
rf_base_rmse, xgbt_base_rmse, gp_base_rmse, enet_base_rmse = run_print_model(train_input, train_target, test_input, test_target, train_days, test_days)
train_input, train_target, test_input, test_target, train_days, test_days = arimax_initialize_setting(window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
arimax_base_rmse = arimax_base_rmse_mode(train_input, train_target, test_input, test_target)
rmse = | pd.DataFrame({'rf_base_rmse': [rf_base_rmse], 'xgbt_base_rmse': [xgbt_base_rmse], 'gp_base_rmse': [gp_base_rmse], 'enet_base_rmse': [enet_base_rmse], 'arimax_base_rmse': [arimax_base_rmse]}) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": pandas.StringDtype(),
"blankCount": | pandas.StringDtype() | pandas.StringDtype |
#/usr/bin/env python
# Script to read the result of the benchmark program and plot the results.
# Options:
# `-i arg` : input file (benchmark result)
# `-o arg` : html output for the plot
# Notes: After the script runs the plot will automatically be shown in a browser.
# Tested with python 3 only.
import argparse
import itertools
import collections
import pandas as pd
import matplotlib.pyplot as plt
import re
from bokeh.layouts import gridplot
from bokeh.palettes import Spectral11
from bokeh.plotting import figure, show, output_file
# Given a file at the start of a test result (on the header line)
# Return a data frame for the test result and leave the file one past
# the blank line at the end of the result
def to_data_frame(header, it):
column_labels = re.split(' +', header.strip())
columns = [[] for i in range(len(column_labels))]
for l in it:
l = l.strip()
if not l:
break
fields = l.split()
if len(fields) != len(columns):
raise Exception('Bad file format, line: {}'.format(l))
for c, f in zip(columns, fields):
c.append(float(f))
d = {k: v for k, v in zip(column_labels, columns)}
return | pd.DataFrame(d, columns=column_labels[1:], index=columns[0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
These test the private routines in types/cast.py
"""
import pytest
from datetime import datetime, timedelta, date
import numpy as np
import pandas as pd
from pandas import (Timedelta, Timestamp, DatetimeIndex,
DataFrame, NaT, Period, Series)
from pandas.core.dtypes.cast import (
maybe_downcast_to_dtype,
maybe_convert_objects,
cast_scalar_to_array,
infer_dtype_from_scalar,
infer_dtype_from_array,
maybe_convert_string_to_object,
maybe_convert_scalar,
find_common_type)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
PeriodDtype)
from pandas.core.dtypes.common import (
is_dtype_equal)
from pandas.util import testing as tm
class TestMaybeDowncast(object):
def test_downcast_conv(self):
# test downcasting
arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
assert (np.array_equal(result, arr))
arr = np.array([8., 8., 8., 8., 8.9999999999995])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
arr = np.array([8., 8., 8., 8., 9.0000000000005])
result = maybe_downcast_to_dtype(arr, 'infer')
expected = np.array([8, 8, 8, 8, 9])
assert (np.array_equal(result, expected))
# GH16875 coercing of bools
ser = Series([True, True, False])
result = maybe_downcast_to_dtype(ser, np.dtype(np.float64))
expected = ser
tm.assert_series_equal(result, expected)
# conversions
expected = np.array([1, 2])
for dtype in [np.float64, object, np.int64]:
arr = np.array([1.0, 2.0], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected, check_dtype=False)
for dtype in [np.float64, object]:
expected = np.array([1.0, 2.0, np.nan], dtype=dtype)
arr = np.array([1.0, 2.0, np.nan], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'infer')
tm.assert_almost_equal(result, expected)
# empties
for dtype in [np.int32, np.float64, np.float32, np.bool_,
np.int64, object]:
arr = np.array([], dtype=dtype)
result = maybe_downcast_to_dtype(arr, 'int64')
tm.assert_almost_equal(result, np.array([], dtype=np.int64))
assert result.dtype == np.int64
def test_datetimelikes_nan(self):
arr = np.array([1, 2, np.nan])
exp = np.array([1, 2, np.datetime64('NaT')], dtype='datetime64[ns]')
res = maybe_downcast_to_dtype(arr, 'datetime64[ns]')
tm.assert_numpy_array_equal(res, exp)
exp = np.array([1, 2, np.timedelta64('NaT')], dtype='timedelta64[ns]')
res = maybe_downcast_to_dtype(arr, 'timedelta64[ns]')
tm.assert_numpy_array_equal(res, exp)
def test_datetime_with_timezone(self):
# GH 15426
ts = Timestamp("2016-01-01 12:00:00", tz='US/Pacific')
exp = DatetimeIndex([ts, ts])
res = maybe_downcast_to_dtype(exp, exp.dtype)
tm.assert_index_equal(res, exp)
res = maybe_downcast_to_dtype(exp.asi8, exp.dtype)
tm.assert_index_equal(res, exp)
class TestInferDtype(object):
def testinfer_dtype_from_scalar(self):
# Test that infer_dtype_from_scalar is returning correct dtype for int
# and float.
for dtypec in [np.uint8, np.int8, np.uint16, np.int16, np.uint32,
np.int32, np.uint64, np.int64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == type(data)
data = 12
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.int64
for dtypec in [np.float16, np.float32, np.float64]:
data = dtypec(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == dtypec
data = np.float(12)
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.float64
for data in [True, False]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.bool_
for data in [np.complex64(1), np.complex128(1)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.complex_
for data in [np.datetime64(1, 'ns'), Timestamp(1),
datetime(2000, 1, 1, 0, 0)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == 'M8[ns]'
for data in [np.timedelta64(1, 'ns'), Timedelta(1),
timedelta(1)]:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == 'm8[ns]'
for tz in ['UTC', 'US/Eastern', 'Asia/Tokyo']:
dt = Timestamp(1, tz=tz)
dtype, val = infer_dtype_from_scalar(dt, pandas_dtype=True)
assert dtype == 'datetime64[ns, {0}]'.format(tz)
assert val == dt.value
dtype, val = infer_dtype_from_scalar(dt)
assert dtype == np.object_
assert val == dt
for freq in ['M', 'D']:
p = Period('2011-01-01', freq=freq)
dtype, val = infer_dtype_from_scalar(p, pandas_dtype=True)
assert dtype == 'period[{0}]'.format(freq)
assert val == p.ordinal
dtype, val = infer_dtype_from_scalar(p)
dtype == np.object_
assert val == p
# misc
for data in [date(2000, 1, 1),
Timestamp(1, tz='US/Eastern'), 'foo']:
dtype, val = infer_dtype_from_scalar(data)
assert dtype == np.object_
def testinfer_dtype_from_scalar_errors(self):
with pytest.raises(ValueError):
infer_dtype_from_scalar(np.array([1]))
@pytest.mark.parametrize(
"arr, expected, pandas_dtype",
[('foo', np.object_, False),
(b'foo', np.object_, False),
(1, np.int_, False),
(1.5, np.float_, False),
([1], np.int_, False),
(np.array([1], dtype=np.int64), np.int64, False),
([np.nan, 1, ''], np.object_, False),
(np.array([[1.0, 2.0]]), np.float_, False),
(pd.Categorical(list('aabc')), np.object_, False),
(pd.Categorical([1, 2, 3]), np.int64, False),
(pd.Categorical(list('aabc')), 'category', True),
(pd.Categorical([1, 2, 3]), 'category', True),
(Timestamp('20160101'), np.object_, False),
(np.datetime64('2016-01-01'), np.dtype('<M8[D]'), False),
(pd.date_range('20160101', periods=3),
np.dtype('<M8[ns]'), False),
(pd.date_range('20160101', periods=3, tz='US/Eastern'),
'datetime64[ns, US/Eastern]', True),
(pd.Series([1., 2, 3]), np.float64, False),
(pd.Series(list('abc')), np.object_, False),
(pd.Series(pd.date_range('20160101', periods=3, tz='US/Eastern')),
'datetime64[ns, US/Eastern]', True)])
def test_infer_dtype_from_array(self, arr, expected, pandas_dtype):
dtype, _ = infer_dtype_from_array(arr, pandas_dtype=pandas_dtype)
assert is_dtype_equal(dtype, expected)
def test_cast_scalar_to_array(self):
arr = cast_scalar_to_array((3, 2), 1, dtype=np.int64)
exp = np.ones((3, 2), dtype=np.int64)
tm.assert_numpy_array_equal(arr, exp)
arr = cast_scalar_to_array((3, 2), 1.1)
exp = np.empty((3, 2), dtype=np.float64)
exp.fill(1.1)
tm.assert_numpy_array_equal(arr, exp)
arr = cast_scalar_to_array((2, 3), Timestamp('2011-01-01'))
exp = np.empty((2, 3), dtype='datetime64[ns]')
exp.fill(np.datetime64('2011-01-01'))
tm.assert_numpy_array_equal(arr, exp)
# pandas dtype is stored as object dtype
obj = Timestamp('2011-01-01', tz='US/Eastern')
arr = cast_scalar_to_array((2, 3), obj)
exp = np.empty((2, 3), dtype=np.object)
exp.fill(obj)
| tm.assert_numpy_array_equal(arr, exp) | pandas.util.testing.assert_numpy_array_equal |
#izvor: https://github.com/asetkn/Tutorial-Image-and-Multiple-Bounding-Boxes-Augmentation-for-Deep-Learning-in-4-Steps/blob/master/Tutorial-Image-and-Multiple-Bounding-Boxes-Augmentation-for-Deep-Learning-in-4-Steps.ipynb
import imgaug as ia
ia.seed(1)
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
from imgaug import augmenters as iaa
import imageio
import pandas as pd
import numpy as np
import re
import os
import glob
import shutil
from csv import reader, writer
folders = glob.glob('D:\\tsr\\train\\*')
images = []
def searchImagesFolder(list, str):
for folder in folders:
for index, file in enumerate(glob.glob(folder+str)):
images.append(imageio.imread(file))
return images
def bbs_obj_to_df(bbs_object):
bbs_array = bbs_object.to_xyxy_array()
df_bbs = pd.DataFrame(bbs_array, columns=['xmin', 'ymin', 'xmax', 'ymax'])
df_bbs.round(0).astype(int)
return df_bbs
def resize_imgaug(df, images_path, aug_images_path, image_prefix):
aug_bbs_xy = pd.DataFrame(columns=
['filename', 'xmin', 'ymin', 'xmax', 'ymax', 'label']
)
grouped = df.groupby('filename')
for filename in df['filename'].unique():
group_df = grouped.get_group(filename)
group_df = group_df.reset_index()
group_df = group_df.drop(['index'], axis=1)
image = imageio.imread(filename)
height=image.shape[0]
width=image.shape[1]
fil=filename.split('/')
fil1=fil[0]
fil2=fil[1]
if(group_df['label'].isnull().values.any()):
image = imageio.imread(images_path+filename)
image_aug = height_resize(image=image)
imageio.imwrite(aug_images_path+'/'+fil1+'/'+image_prefix+fil2, image_aug)
group_df['xmin'] =''
group_df['xmax'] =''
group_df['ymin'] =''
group_df['ymax'] =''
info_df = group_df
info_df['filename'] = info_df['filename'].apply(lambda x: aug_images_path+fil1+'/'+image_prefix+fil2)
aug_bbs_xy = pd.concat([aug_bbs_xy, info_df])
else:
image = imageio.imread(images_path+filename)
bb_array = group_df.drop(['filename', 'label'], axis=1).values
bbs = BoundingBoxesOnImage.from_xyxy_array(bb_array, shape=image.shape)
image_aug, bbs_aug = height_resize(image=image, bounding_boxes=bbs)
imageio.imwrite(aug_images_path+'/'+fil1+'/'+image_prefix+fil2, image_aug)
info_df = group_df.drop(['xmin', 'ymin', 'xmax', 'ymax'], axis=1)
info_df['filename'] = info_df['filename'].apply(lambda x: aug_images_path+fil1+'/'+image_prefix+fil2)
bbs_df = bbs_obj_to_df(bbs_aug)
aug_df = pd.concat([info_df, bbs_df], axis=1)
aug_bbs_xy = | pd.concat([aug_bbs_xy, aug_df]) | pandas.concat |
import numpy as np
from unet import utils
from unet.sim_measures import jaccard_pixelwise, jaccard_roiwise
from helper_fxns import ijroi
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from shapely.geometry import Polygon, MultiPolygon
from shapely.ops import cascaded_union
from keras.preprocessing.image import img_to_array, load_img
from scipy.misc import imsave
import pandas as pd
import itertools
import json
import sys
import os
from bokeh.plotting import figure, show, output_file, ColumnDataSource
from bokeh.palettes import Category20_20 as palette
from bokeh.palettes import brewer
def read_json(data_file):
#INPUT:
# data_file: path to json file
#OUTPUT:
# return: data from json file
with open(data_file, "r") as read_file:
data = json.load(read_file)
return(data)
def convert_to_single_polygon(multi_polygon_object, gene_name):
# INPUT:
# multi_polygon_object: list of polygon objects
# gene_name: names of the genes being smushed together
# OUTPUT:
# return: single polygon object
single_poly = MultiPolygon(Polygon(p.exterior) for p in multi_polygon_object)
single_poly = cascaded_union([Polygon(component.exterior).buffer(4).buffer(-3) for component in single_poly])
if single_poly.type == "Polygon":
return(single_poly)
else:
print("!!!!!!YOU NEED TO FIX THE IMAGEJ ROI FOR: ", gene_name, " AT: ", single_poly.bounds)
return(None)
def shape_objs_from_roi_file(roi_file):
roi = ijroi.read_roi_zip(roi_file)
ply_list = []
for r in roi:
coords = r[1]
coord_list = []
for p in coords:
pt = (p[0], p[1])
coord_list.append(pt)
ply = Polygon(coord_list)
ply = ply.buffer(0)
if ply.type == "MultiPolygon":
ply = convert_to_single_polygon(ply, roi_file)
if ply.area > 0:
ply_list.append(ply)
return(ply_list)
def shape_obj_from_df_coords(coords):
# INPUT: coordinate list for a polygon
# OUTPU: a shapely.Polygon object
ys = coords[0]
xs = coords[1]
coord_list = []
for y, x in zip(ys, xs):
coord_list.append((y,x))
ply = Polygon(coord_list)
ply.buffer(0)
return(ply)
def get_mean_pix_in_bb(img, bb_bounds):
# print(bb_bounds)
y1 = int(bb_bounds[0])
y2 = int(bb_bounds[2])
x1 = int(bb_bounds[1])
x2 = int(bb_bounds[3])
bb_img = img[y1:y2,x1:x2,:]
bb_mean = np.average(bb_img)
# print(bb_mean)
# print(bb_img.shape)
return(bb_mean)
def redraw_masks(roi_df, ch_dict, img_shape, mask_save_dir):
from PIL import Image, ImageDraw
# arr = np.zeros(img_shape)
img_shape = (img_shape[1], img_shape[0])
for ch_num, ch_name in ch_dict.items():
img = Image.new('L', img_shape, 0)
ch_df = roi_df.loc[roi_df['gene'] == ch_name]
img = Image.new('L', img_shape, 0)
for index, row in ch_df.iterrows():
ply = shape_obj_from_df_coords(row['polygon_coords'])
ys = np.array(ply.exterior.xy)[0].tolist()
xs = np.array(ply.exterior.xy)[1].tolist()
ply_coords = list(zip(xs, ys))
# print(ply_coords)
ImageDraw.Draw(img).polygon(ply_coords, outline=255, fill=255)
mask = np.array(img)
outfile = os.path.join(mask_save_dir, ch_name + '_test_mask.tif')
imsave(outfile, mask)
# print(mask.shape)
# print(np.max(mask))
def df_from_roi_file(roi_zip_files, root_file_path, ch_dict, area_threshold):
#INPUT:
# roi_zip_files: names of the roi files in the root folder
# root_file_path: path to the enclosing folder which contains roi/img/csv files
# ch_dict: key=index of channel in multichannel tiff img. value=channel name. I omitted dapi.
# area_threshold: minimum roi size to pass
#OUTPUT:
# return: pd.dataframe of all rois of selected channels that meet minimum size requirement
# roi_zip_files.sort()
# gene_names = [s.split('_')[0] for s in roi_zip_files]
# gene_names.sort()
column_names = ['gene', 'channel_number', 'bb_coords', 'polygon_coords', 'polygon_area', 'mean_intensity_in_bb', 'centroid', 'index_num_coexpressed', 'channel_nums_coexpressed', 'channel_names_coexpressed']
img_df = pd.DataFrame(columns=column_names)
# for name, roi_file in zip(gene_names, roi_zip_files):
# full_roi_file_path = os.path.join(root_file_path, roi_file)
# ply_list = shape_objs_from_roi_file(full_roi_file_path)
for ch, name in ch_dict.items():
file_name = name + '_RoiSet.zip'
full_roi_file_path = os.path.join(root_file_path, file_name)
ply_list = shape_objs_from_roi_file(full_roi_file_path)
for p in ply_list:
if p.area > area_threshold:
load_dict = {}
load_dict['gene'] = name
load_dict['bb_coords'] = list(p.bounds)
load_dict['channel_number'] = ch
load_dict['polygon_coords'] = list(p.exterior.xy)
load_dict['polygon_area'] = p.area
load_dict['mean_intensity_in_bb'] = get_mean_pix_in_bb(dummy_img, list(p.bounds))
load_dict['centroid'] = list(p.centroid.coords)
img_df = img_df.append(load_dict, ignore_index=True)
# print(np.array(p.exterior.xy))
# print(img_df)
return(img_df)
def find_coexpression_indexs(roi_df, channel_dict, iou_threshold):
#INPUT:
# roi_df: pd.DataFrame of all rois
# channel_dict: key=index of channel in multichannel tiff img. value=channel name. I omitted dapi.
# iou_threshold: minimum IntersectionOverUnion value to count as coexpression
#OUTPUT:
# return: pd.DataFrame same as 'roi_df' with extra columns for 'index_num_coexpressed' 'channel_nums_coexpressed' 'channel_names_coexpressed'
# add new columns to df
roi_df = roi_df
roi_df['index_num_coexpressed'] = roi_df['index_num_coexpressed'].astype(object)
roi_df['channel_nums_coexpressed'] = roi_df['channel_nums_coexpressed'].astype(object)
roi_df['channel_names_coexpressed'] = roi_df['channel_names_coexpressed'].astype(object)
# generate list of all possible combinations of coexpression n=2
gene_list = list(channel_dict.values())
comb_gene_list = list(itertools.combinations(gene_list, 2))
for comb in comb_gene_list:
g1_df = roi_df.loc[roi_df['gene'] == comb[0]]
g2_df = roi_df.loc[roi_df['gene'] == comb[1]]
for index1, row1 in g1_df.iterrows():
poly1 = shape_obj_from_df_coords(row1['polygon_coords'])
for index2, row2 in g2_df.iterrows():
poly2 = shape_obj_from_df_coords(row2['polygon_coords'])
# get IoU value IF the plygons intersect. saves allot of time.
if poly1.intersects(poly2):
intersection = poly1.intersection(poly2).area
union = (poly1.area+poly2.area) - intersection
iou = intersection/union
# if IoU > iou_threshold update index_num_coexpressed list in df
if iou > iou_threshold:
coex_prev_index_1 = roi_df.at[index1, 'index_num_coexpressed']
if np.isnan(coex_prev_index_1).all():
index_list_1 = [index2]
roi_df.at[index1, 'index_num_coexpressed'] = index_list_1
else:
coex_prev_index_1.append(index2)
roi_df.at[index1, 'index_num_coexpressed'] = coex_prev_index_1
coex_prev_index_2 = roi_df.at[index2, 'index_num_coexpressed']
if np.isnan(coex_prev_index_2).all():
index_list_2 = [index1]
roi_df.at[index2, 'index_num_coexpressed'] = index_list_2
else:
coex_prev_index_2.append(index1)
roi_df.at[index2, 'index_num_coexpressed'] = coex_prev_index_2
return(roi_df)
def fill_in_df_holes(roi_df, ch_dict):
#INPUT:
# roi_df: coexpression roi pd.DataFrame
# ch_dict: key=index of channel in multichannel tiff img. value=channel name. I omitted dapi.
#OUTPUT:
# roi_df: df same as roi_df, with 'channel_number' and 'gene' columns filled in.
# count_df: df of counts of coexpression
roi_df = roi_df
# create an empty array to count coexpression into
n_ch = int(max(ch_dict.keys()))+1
count_arr = np.zeros((n_ch, n_ch))
ch_keys = list(ch_dict.keys())
for index, row in roi_df.iterrows():
coex_indxs = row['index_num_coexpressed']
# update count_arr
count_coord_primary = int(row['channel_number'])
count_arr[(count_coord_primary, count_coord_primary)] += 1
if isinstance(coex_indxs, list):
ch_num_list = []
ch_name_list = []
for inx in coex_indxs:
ch_num = roi_df.at[inx, 'channel_number']
ch_name = roi_df.at[inx, 'gene']
ch_num_list.append(int(ch_num))
ch_name_list.append(ch_name)
# update count_arr
count_arr[(count_coord_primary, int(ch_num))] += 1
roi_df.at[index, 'channel_nums_coexpressed'] = ch_num_list
roi_df.at[index, 'channel_names_coexpressed'] = ch_name_list
# convert count_arr to a named dataframe of columns and indexes of ch_name
count_df = | pd.DataFrame(columns=ch_keys, index=ch_keys) | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
import warnings
from pandas.testing import assert_frame_equal
from typing import Dict
from io import BytesIO, StringIO
from zipfile import ZipFile, ZIP_DEFLATED
import sys
import os
import boto3
# Define type aliases
DF = pd.DataFrame
# Add project root directory to Python path.
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from fpldata.s3store import S3Store
class TestS3Store(unittest.TestCase):
"""
Tests the class that persists data frames to S3. To be able to execute these integration tests,
you need:
a) an AWS account
b) test BUCKET (Note you will need to update the test_s3_bucket variable accordingly)
c) your AWS keys locally (see https://docs.aws.amazon.com/AmazonS3/latest/dev/setup-aws-cli.html)
"""
test_s3_bucket = 'fpl-test.177arc.net'
test_obj_name = 'test.csv'
s3store: S3Store
s3: boto3.resource
def __read_df(self, obj_name: str) -> DF:
obj = self.s3.get_object(Bucket=self.test_s3_bucket, Key=obj_name)
content = BytesIO(obj['Body'].read())
compression = None
if obj['ContentEncoding'] == 'gzip':
compression = 'gzip'
return pd.read_csv(content, compression=compression)
def __read_df_zip(self, obj_name: str) -> DF:
return list(self.__read_dfs_zip(obj_name).values())[0]
def __read_dfs_zip(self, obj_name: str) -> Dict[str, DF]:
zf = self.__read_zip(obj_name)
dfs = {}
with zf:
for file_name in zf.namelist():
dfs[file_name.replace('.csv', '')] = pd.read_csv(BytesIO(zf.read(file_name)))
return dfs
def __read_zip(self, obj_name: str) -> ZipFile:
obj = self.s3.get_object(Bucket=self.test_s3_bucket, Key=obj_name)
buffer = BytesIO(obj["Body"].read())
return ZipFile(buffer)
def __write_df(self, df: DF, obj_name: str) -> None:
csv_buffer = StringIO()
df.to_csv(csv_buffer)
self.s3.put_object(Bucket=self.test_s3_bucket, Key=obj_name, Body=csv_buffer.getvalue())
def __write_df_zip(self, df: DF, obj_name: str) -> None:
self.__write_dfs_zip({obj_name.replace('.zip', ''): df}, obj_name)
def __write_dfs_zip(self, dfs: Dict[str,DF], obj_name: str) -> None:
zip_buffer = BytesIO()
with ZipFile(zip_buffer, mode='w', compression=ZIP_DEFLATED) as zf:
for df_name, df in dfs.items():
csv_buffer = StringIO()
df.to_csv(csv_buffer)
zf.writestr(df_name+'.csv', csv_buffer.getvalue())
self.s3.put_object(Bucket=self.test_s3_bucket, Key=obj_name, Body=zip_buffer.getvalue())
def __del(self, obj_name: str) -> None:
self.s3.delete_object(Bucket=self.test_s3_bucket, Key=obj_name)
def setUp(self) -> None:
warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*<ssl.SSLSocket.*>")
self.s3store = S3Store(s3_bucket=self.test_s3_bucket)
self.s3 = boto3.client('s3')
self.dfs = {'df1': pd.DataFrame.from_dict(
{0: ['test 1', 1, True, 1.1, 'bayern'],
1: ['test 2', 2, False, 1.2, 'bayern'],
2: [np.nan, 3, np.nan, np.nan, 'bayern']},
orient='index',
columns=['Name 1', 'Name 2', 'Name 3', 'Name 4', 'Name 5']).set_index('Name 2'),
'df2': pd.DataFrame.from_dict(
{0: ['test 1', 1, True, 1.1],
1: ['test 2', 2, False, 1.2]},
orient='index',
columns=['Col 1', 'Col 2', 'Col 3', 'Col 4']).set_index('Col 1')}
self.df = list(self.dfs.values())[0]
def test_save_df(self) -> None:
# Set up
self.__del(self.test_obj_name)
# Execute test
self.s3store.save_df(self.df, self.test_obj_name)
# Assert
actual_df = self.__read_df(self.test_obj_name)
assert_frame_equal(self.df.reset_index(), actual_df, check_dtype=False, check_column_type=False)
def test_save_df_zip(self) -> None:
# Set up
self.__del(self.test_obj_name + '.zip')
# Execute test
self.s3store.save_df(self.df, self.test_obj_name + '.zip')
# Assert
actual_df = self.__read_df_zip(self.test_obj_name + '.zip')
assert_frame_equal(self.df.reset_index(), actual_df, check_dtype=False, check_column_type=False)
def test_save_dfs(self) -> None:
# Set up
self.__del(self.test_obj_name)
# Execute test
self.s3store.save_dfs(self.dfs, self.test_obj_name + '.zip')
# Assert
actual_dfs = self.__read_dfs_zip(self.test_obj_name + '.zip')
for df_name, df in self.dfs.items():
assert_frame_equal(df.reset_index(), actual_dfs[df_name], check_dtype=False, check_column_type=False)
def test_save_dir_zip(self):
# Set up
dfs = {}
dfs['df1'] = pd.read_csv('dfs/df1.csv')
dfs['df2'] = pd.read_csv('dfs/df2.csv')
# Execute test
self.s3store.save_dir('dfs', self.test_obj_name + '.zip')
# Assert
actual_dfs = self.__read_dfs_zip(self.test_obj_name + '.zip')
for df_name, df in dfs.items():
| assert_frame_equal(df, actual_dfs[df_name], check_dtype=False, check_column_type=False) | pandas.testing.assert_frame_equal |
import sqlite3
import pandas as pd
conn = sqlite3.connect('rpg_db.sqlite3')
cur = conn.cursor()
# How many total Characters are there?
query1 = """
SELECT COUNT(character_id)
FROM charactercreator_character cc
"""
cur.execute(query1)
result_list = cur.fetchall()
cols = [ii[0] for ii in cur.description]
df1 = pd.DataFrame(result_list, columns=cols)
my_conn1 = sqlite3.connect("my_db1.sqlite")
df1.to_sql('my_table1', my_conn1, index=False, if_exists='replace')
# How many of each specific subclass?
query2 = """
SELECT COUNT(DISTINCT cf.character_ptr_id) AS TotalFighter,
COUNT(DISTINCT cc.character_ptr_id) AS TotalCleric,
COUNT(DISTINCT cm.character_ptr_id) AS TotalMage,
COUNT(DISTINCT cn.mage_ptr_id) AS TotalNecromancer,
COUNT(DISTINCT ct.character_ptr_id) AS TotalThief
FROM charactercreator_fighter cf, charactercreator_cleric cc,
charactercreator_mage cm, charactercreator_necromancer cn,
charactercreator_thief ct
"""
df2 = pd.read_sql(query2, conn)
my_conn2 = sqlite3.connect("my_db2.sqlite")
df2.to_sql('my_table2', my_conn2, index=False, if_exists='replace')
# How many total Items?
query3 = """
SELECT COUNT(item_id) AS ItemTotal
FROM armory_item ai
"""
df3 = pd.read_sql(quer3, conn)
my_conn3 = sqlite3.connect("my_db3.sqlite")
df3.to_sql('my_table3', my_conn3, index=False, if_exists='replace')
# How many of the Items are weapons? How many are not?
query4 = """
SELECT COUNT(DISTINCT aw.item_ptr_id) AS TotalWeapons,
COUNT(DISTINCT ai.item_id) - COUNT(DISTINCT aw.item_ptr_id) AS TotalNotWeapons
FROM armory_item ai, armory_weapon aw
"""
df4 = pd.read_sql(quer4, conn)
my_conn4 = sqlite3.connect("my_db4.sqlite")
df4.to_sql('my_table', my_conn4, index=False, if_exists='replace')
# How many Items does each character have? (Return first 20 rows)
query5 = """
SELECT character_id, COUNT(item_id) AS ItemsTotal
FROM charactercreator_character_inventory cci
GROUP BY character_id
LIMIT 20
"""
df5 = | pd.read_sql(quer5, conn) | pandas.read_sql |
#!/usr/bin/python3
from sys import argv
import sys
#from PyQt5 import QtCore, QtGui, uic, QtWidgets
#from PyQt5.QtWebEngineWidgets import *
#from PyQt5.QtCore import QUrl
import numpy as np
from jupyter_dash import JupyterDash
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
from sklearn import linear_model
plt.rcParams["figure.figsize"] = (15,15)
import math
import seaborn as sns
import shap
#from datetime import datetime
import time
from ipywidgets.embed import embed_minimal_html
from umap import UMAP
from pandas_profiling import ProfileReport
from sklearn.neighbors import kneighbors_graph
from prophet import Prophet
import umap
from lightgbm import LGBMRegressor,LGBMClassifier
from sklearn.preprocessing import *
from sklearn.decomposition import *
from sklearn.manifold import *
from sklearn.pipeline import make_pipeline
from sklearn.utils import estimator_html_repr
import sklearn
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import *
from sklearn.linear_model import *
import networkx as nx
from prophet.plot import plot_plotly, plot_components_plotly
import calendar
from prophet.utilities import regressor_coefficients
import plotly.express as px
#from jupyter_dash import JupyterDash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output
import base64
import numpy as np
import pandas as pd
from io import StringIO
import io
import dash
from dash.dependencies import Input, Output, State
import dash_html_components as html
import dash_core_components as dcc
import dash_table
import dash_cytoscape as cyto
from dash.exceptions import PreventUpdate
from keplergl import KeplerGl
import hdbscan
import datetime
from scipy.spatial import distance_matrix
from sklearn.metrics.pairwise import euclidean_distances
from scipy.stats import ttest_ind, ttest_1samp
from dash_table.Format import Format, Scheme, Trim
from sklearn.compose import make_column_transformer
from ipywidgets import AppLayout, Button, Layout, Accordion
from ipywidgets import Button, Layout, jslink, IntText, IntSlider, HBox, VBox
from ipywidgets import GridspecLayout
from sklearn.preprocessing import *
from sklearn.decomposition import *
from sklearn.manifold import *
from sklearn.pipeline import make_pipeline
from umap import UMAP
from sklearn.ensemble import *
from sklearn.linear_model import *
from joblib import Memory
from shutil import rmtree
import sklearn
from sklearn import svm, datasets
from sklearn.metrics import auc,confusion_matrix,plot_confusion_matrix,classification_report
from sklearn.metrics import plot_roc_curve
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.compose import ColumnTransformer
from lightgbm import LGBMClassifier, LGBMRegressor
from skopt import BayesSearchCV, gp_minimize, forest_minimize, gbrt_minimize
from skopt.searchcv import BayesSearchCV as BSCV
from skopt.space import Real, Categorical, Integer
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn import set_config
from sklearn.ensemble import RandomForestClassifier as rf
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import cross_val_score, cross_validate, StratifiedKFold, KFold
from skopt.plots import plot_objective
from skopt.utils import use_named_args
from skopt.plots import plot_convergence
from sklearn.feature_selection import RFECV
from lightgbm import LGBMRegressor
from lightgbm import LGBMClassifier
from sklearn.preprocessing import StandardScaler
set_config(display='diagram')
import numpy as np
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import matplotlib
#matplotlib.style.use('seaborn')
from sklearn import linear_model
import math
import seaborn as sns
import shap
#from datetime import datetime
import time
import ipywidgets as widget
import dash_html_components as html
from sklearn.base import clone
#JupyterDash.infer_jupyter_proxy_config() un comment for binder use
def _force_plot_htmlsm(*args):
force_plot = shap.force_plot(*args, matplotlib=False)
shap_html = f"<head>{shap.getjs()}</head><body>{force_plot.html()}</body>"
return html.Iframe(srcDoc=shap_html,style={"width": "100%", "height": "400px", "border": 0})
#### things to add : memory managment
#### add n_jobs = -1 for everything
#### add featureUnion
class pipemaker2:
def __init__(self, df,ipt_pipe, target ,*, height = 'auto', width = 'auto'):
self.pipe_list = []
###### Dataframe!
self.df = df
###### App
self.target = widget.Select(options = list(self.df.columns), description = 'Target',rows=1 ,layout=Layout(height='auto', width='33%'))
self.target.value = target
self.TG = target
self.classifier = widget.Select(options = ['LGBMClassifier', 'LGBMRegressor'] + sklearn.ensemble.__all__ + sklearn.linear_model.__all__, description = 'Classifier',rows=1, layout=Layout(height='auto', width='33%'))
#### add column buttons
self.nColumns = widget.BoundedIntText( value=1,min=1,step=1,description='Number of column transformers:' ,layout=Layout(height='auto', width='33%'))
self.nColumns.observe(self.maketab, "value")
self.top_box = HBox([self.nColumns, self.target, self.classifier],layout=Layout(height='auto', width='100%'))
self.acc_list = [self.makeacc()]
self.check = 0
self.tab = widget.Tab()
self.tab.set_title(0, '0')
self.tab.children = self.acc_list
self.widget = VBox([self.top_box, self.tab])
self.cached_pipe = 0
self.location = 0
self.memory = 0
self.optimized_pipe = (0, 0)
self.input_pipe = ipt_pipe
def makeacc(self):
accordion = widget.Accordion(children=[
widget.Text(str(self.nColumns.value)),
widget.SelectMultiple(options=self.df.columns.values, description='columns',rows=len(self.df.columns)),
widget.Text(''),
widget.ToggleButtons(options= ['None'] + [x for x in sklearn.preprocessing.__all__ if x[0].isupper() ] ),
widget.ToggleButtons(options= ['None'] + [x for x in sklearn.decomposition.__all__ if x[0].isupper() ] ),
widget.ToggleButtons(options= ['None', 'UMAP'] + [x for x in sklearn.manifold.__all__ if x[0].isupper() ] )
])
accordion.set_title(0, 'Name of transformer')
accordion.set_title(1, 'Column to be transformed')
accordion.set_title(2, 'Manual input')
accordion.set_title(3, 'Sklearn preprocessing')
accordion.set_title(4, 'Sklearn decomposition')
accordion.set_title(5, 'Sklearn manifold')
accordion.selected_index = None
return accordion
def accordion_to_tuple(self, acc):
if acc.children[-4].value == '': transformer_list = [eval(x.value + '()') for x in acc.children[-3:] if x.value !='None' ]
else: transformer_list = eval('[' + acc.children[-4].value+ ']')
if len(transformer_list) > 0: pipe = make_pipeline( *transformer_list)
else: pipe = Pipeline(steps = [('empty','passthrough')])
self.check = (acc.children[0].value, pipe, tuple(acc.children[1].value))
return (acc.children[0].value, pipe,tuple(acc.children[1].value))
def maketab(self, change):
if self.nColumns.value > len(self.acc_list):
self.acc_list += [self.makeacc() for i in range(self.nColumns.value - len(self.acc_list))]
elif self.nColumns.value < len(self.acc_list):
self.acc_list = self.acc_list[:self.nColumns.value]
self.tab.children = self.acc_list
for num, acc in enumerate(self.acc_list):
self.tab.set_title(num, str(acc.children[0].value))
self.widget = VBox([self.top_box, self.tab])
def Pipe(self):
return clone(self.input_pipe) #Pipeline(steps = [('preprocessing', self.ColumnTransform()), ('classifier', eval(self.classifier.value + '()') )])
def Cache_pipe(self):
self.location = 'cachedir'
self.memory = Memory(location=self.location, verbose=0)
self.cached_pipe = self.Pipe().set_params(memory = self.memory)
def release_cache(self):
self.memory.clear(warn=True)
rmtree(self.location)
del self.memory
def display_app(self):
display(self.widget)
def ColumnTransform(self):
return ColumnTransformer([self.accordion_to_tuple(aco) for aco in self.acc_list])
def export_kwards(self):
return self.Pipe().get_params()
def fit_transform(self):
return self.ColumnTransform().fit_transform(self.df)
def fit_predict(self):
return self.Pipe().fit_predict(self.df, self.df[self.TG])
def fit(self):
return self.Pipe().fit(self.df, self.df[self.TG])
def RFECV(self):
preprocessed_df = pd.DataFrame(self.Pipe()['preprocessing'].fit_transform(self.df))
if self.optimized_pipe[1] == 0:
selector = RFECV(self.Pipe()['classifier'], step=1, cv=KFold(10, shuffle= True)).fit(preprocessed_df, self.df[self.TG])
else:
selector = RFECV(self.optimized_pipe[0]['classifier'], step=1, cv=KFold(10, shuffle= True)).fit(preprocessed_df, self.df[self.TG])
hX = np.array( range(1, len(selector.grid_scores_) + 1))
hY= selector.grid_scores_
H = pd.DataFrame(np.array([hX, hY]).T, columns = ['Number of parameters', 'Cross Validation Score'])
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(hX, hY)
plt.show()
return pd.DataFrame([selector.ranking_, selector.support_], columns = preprocessed_df.columns, index = ['Ranking', 'support'])
def make_skpot_var(self, param, temperature = 3, distribution = 'uniform', just_classifier = False): #'log-uniform'
value = self.export_kwards()[param]
if just_classifier == True: name = param.split('__')[1]
else: name = param
if value == 0 or value ==1: return
if type(value) == int:
if value == -1: return Integer(1, 200, name = name)
lower_bondary = int(value/temperature)
if lower_bondary < 2: lower_bondary = 2
upper_bondary = int(value*temperature) + lower_bondary
#if value <= 1: return Real(1e-3, 1, distribution ,name = name)
return Integer(lower_bondary, upper_bondary, distribution ,name = name)
if type(value) == float:
if value == -1: return Real(1, 200, name = name)
if value <= 1: return Real(1e-3, 1, distribution ,name = name)
lower_bondary = value/temperature
if lower_bondary < 2: lower_bondary = 2
upper_bondary = value*temperature + lower_bondary
return Real(lower_bondary, upper_bondary, distribution ,name = name)
def skopt_classifier_space(self, just_classifier = False):
dic = self.export_kwards()
classifier_params = [x for x in dic.keys()
if x.find('classifier__') != -1
and x.find('silent') == -1
and x.find('n_jobs') == -1
and x.find('bagging_fraction') == -1
and x != 'classifier__subsample'
and x != 'classifier__validation_fraction'] # and
SPACE = [self.make_skpot_var(i, just_classifier = just_classifier) for i in classifier_params]
SPACE = [x for x in SPACE if x if x != None ]
return SPACE
def objective(self, params):
classifier = self.Pipe().set_params(**{dim.name: val for dim, val in zip(self.skopt_classifier_space(), params)})
return -np.mean(cross_val_score(classifier, self.df, self.df[self.TG], cv = StratifiedKFold(n_splits = 5, shuffle=True)))
def objective_just_classifier(self, params, metric , cv_method ):
return -np.mean(cross_val_score(self.cached_pipe['classifier'].set_params(**{dim.name: val for dim, val in zip(self.skopt_classifier_space(just_classifier = 1), params)}),
self.transformed_opt,
self.target_opt,
scoring = metric,
cv = cv_method,
n_jobs = -1))
def objective_cached(self, params):
return -np.mean(cross_val_score(self.cached_pipe.set_params(**{dim.name: val for dim, val in zip(self.skopt_classifier_space(), params)}),
self.df,
self.df[self.TG],
cv = StratifiedKFold(n_splits = 5, shuffle=True)))
def optimize_classifier(self, n_calls = 50, cache = False):
if cache:
self.Cache_pipe()
result = gp_minimize(self.objective_cached, self.skopt_classifier_space() , n_calls=n_calls)
self.release_cache()
else: result = gp_minimize(self.objective, self.skopt_classifier_space() , n_calls=n_calls)
#plot_convergence(result)
#_ = plot_objective(result, n_points=n_calls)
#print(result.fun)
return {'result': result, 'best_params': self.get_params(result, self.skopt_classifier_space() )}
def fast_optimize_classifier(self, n_calls = 50, is_classifier = True):
self.Cache_pipe()
self.transformed_opt = self.cached_pipe['preprocessing'].fit_transform(self.df)
self.target_opt = self.df[self.TG]
if is_classifier:
cv_method = StratifiedKFold(n_splits = 5, shuffle=True)
metric = 'f1_weighted'
else:
cv_method = KFold(n_splits = 5, shuffle=True)
metric = 'r2'
result = gp_minimize(lambda x: self.objective_just_classifier(x, metric, cv_method), self.skopt_classifier_space(just_classifier = True) , n_calls=n_calls)
self.release_cache()
best_params = self.get_params(result, self.skopt_classifier_space(just_classifier = True))
best_params = {'classifier__'+ i[0]:i[1] for i in best_params.items()}
self.optimized_pipe = (self.Pipe().set_params(**best_params), 1)
return {'result': result, 'best_params':best_params}
def get_params(self, result_object, space):
try:
return { i.name: result_object.x[num] for num, i in enumerate(space) }
except:
raise
def Vis_Cluster(self, method):
transformed = self.Pipe()['preprocessing'].fit_transform(self.df)
classsification = method.fit_predict(transformed) #(*args, **kwds)
end_time = time.time()
palette = sns.color_palette('deep', np.unique(classsification).max() + 1)
colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in classsification]
plt.scatter(transformed.T[0], transformed.T[1], c=colors, s = MinMaxScaler(feature_range=(30, 300)).fit_transform(self.df[self.TG].values.reshape(-1, 1)) , **{'alpha' : 0.5, 'linewidths':0})
frame = plt.gca()
for num, spine in enumerate(frame.spines.values()):
if num == 1 or num == 3: spine.set_visible(False)
plt.title('Clusters found by {}'.format(str(method)), fontsize=24)
plt.show()
return
def Evaluate_model(self):
tprs = []
aucs = []
prd = []
tru = []
mean_fpr = np.linspace(0, 1, 100)
X = self.df.copy()
y = self.df[self.TG]
if self.optimized_pipe[1] == 0: clf = self.Pipe()
else: clf = self.optimized_pipe[0]
fig, ax = plt.subplots(1, 2, figsize = (20,10))
try:
for i, (train, test) in enumerate(StratifiedKFold(n_splits=5, shuffle=True).split(X, y)):
clf.fit(X.iloc[train], y.iloc[train])
viz = plot_roc_curve(clf, X.iloc[test], y.iloc[test],
name='ROC fold {}'.format(i),
alpha=0.3, lw=1, ax=ax[0])
interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(viz.roc_auc)
ax[0].plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax[0].plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax[0].fill_between(mean_fpr, tprs_lower, tprs_upper, color='steelblue', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax[0].set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])
# title="Receiver operating characteristic example")
ax[0].legend(loc="lower right")
except: print('non-binary classifier')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
try:
plot_confusion_matrix(clf.fit(X_train, y_train), X_test, y_test,
display_labels=['negative detection', 'positive detection'],
cmap=plt.cm.Blues, ax = ax[1])
ax[1].grid(False)
except: print('is it a regressor?')
fig.tight_layout()
try:
report = classification_report(clf.predict(X_test), y_test, output_dict=True) # target_names=['Negative detection', 'Positive detection']
except: #### report for regression
if self.optimized_pipe[1] == 0: clf = self.Pipe()
else: clf = self.optimized_pipe[0]
report = cross_validate(clf, X, y, cv=5, scoring=('neg_mean_absolute_percentage_error','r2','explained_variance', 'max_error', 'neg_mean_absolute_error', 'neg_mean_squared_error'))
fig, ax = plt.subplots(1, 1, figsize = (1,1))
return report, fig
def named_preprocessor(self):
naming_features = []
for transformer in self.Pipe()['preprocessing'].transformers:
transformed = ColumnTransformer(transformers = [transformer]).fit_transform(self.df)
if transformed.shape[1] == len(transformer[2]):
naming_features += list(transformer[2])
else:
naming_features += [transformer[0] +'__'+ str(i) for i in range(transformed.shape[1]) ]
if self.optimized_pipe[1] == 0: clf = self.Pipe()
else: clf = self.optimized_pipe[0]
return pd.DataFrame(clf['preprocessing'].fit_transform(self.df), columns = naming_features)
def Shapley_feature_importance(self):
if self.optimized_pipe[1] == 0: clf = self.Pipe()
else: clf = self.optimized_pipe[0]
shap.initjs()
dat_trans = self.named_preprocessor()
explainer = shap.TreeExplainer(clf['classifier'].fit(dat_trans, self.df[self.TG])) #,feature_perturbation = "tree_path_dependent"
shap_values = explainer.shap_values(dat_trans)
#### force-plot
a = [_force_plot_htmlsm(explainer.expected_value[i], shap_values[i], dat_trans) for i in len(shap_values)]
#### heatmap
#try: hmap = shap.TreeExplainer(clf['classifier'].fit(dat_trans, self.df[self.TG]), dat_trans) #redo check additivity
#except:
# print('Failed in heatmap, using LGBMC instead')
# hmap = shap.TreeExplainer(LGBMClassifier().fit(dat_trans, self.df[self.TG]), dat_trans)
#fig, ax = plt.subplots(1,1, figsize=(15, 15))
#shap.plots.heatmap(hmap(dat_trans)) ### figure is fig
### dependence matrix
ivalues = explainer.shap_interaction_values(dat_trans)
figdm, axdm = plt.subplots(len( dat_trans.columns), len(dat_trans.columns), figsize=(15, 15))
d = {i: name for i,name in enumerate(dat_trans.columns)}
for i in d.keys():
for j in d.keys():
shap.dependence_plot((d[i], d[j]), ivalues[1], dat_trans, ax = axdm[i,j], show = False)
### dependence plots
#figdp, axdp = plt.subplots( len(dat_trans.columns)//4+1, 4, figsize=(15, 15))
#for num, col in enumerate(dat_trans.columns):
# shap.dependence_plot(col, shap_values[1], dat_trans, ax = axdp[num//4,num%4], show= False)
return (a, figdm) #fig,
cyto.load_extra_layouts()
height, width = [500,500]
canvas_width = 500
canvas_height = round(height * canvas_width / width)
scale = canvas_width / width
def plotly_cyt(d):
edges = [{'data': {'weight': i['data']['weight'], 'source': str(i['data']['source']), 'target': str(i['data']['target'])}} for i in d['edges']]
nodes = [{'data': {k:i['data'][k] for k in ('id', 'value', 'name') }, 'position' : dict(zip(('x', 'y'),i['data']['data']))} for i in d['nodes']]
return nodes + edges
def plotly_cyt2(G):
d = nx.cytoscape_data(G)['elements']
pos = nx.spring_layout(G)
edges = [{'data': {'weight': i['data']['weight'], 'source': str(i['data']['source']), 'target': str(i['data']['target'])}} for i in d['edges']]
nodes = [{'data': {k:i['data'][k] for k in ('id', 'value', 'name') }, 'position' : dict(zip(('x', 'y'),j))} for i,j in zip(d['nodes'], list(pos.values()))]
return nodes + edges
def plotly_cyt3(G):
d = nx.cytoscape_data(G)['elements']
pos = nx.spring_layout(G)
edges = [{'data': {'weight': i['data']['weight'], 'source': str(i['data']['source']), 'target': str(i['data']['target'])}} for i in d['edges']]
nodes = [{'data': {**{k:i['data'][k] for k in ('id', 'value', 'name') }, **{'degree': degree[1]}} , 'position' : dict(zip(('x', 'y'),j))}
for i,j,degree in zip(d['nodes'], list(pos.values()), list(G.degree))]
return nodes + edges
def make_colormap_clustering(column, palette, continuous, data):
if not continuous:
lut = dict(zip(sorted(data[column].unique()), sns.color_palette(palette, len(data[column].unique()))))
else: lut = sns.color_palette(palette, as_cmap=True)
return data[column].map(lut)
def _force_plot_html(*args):
force_plot = shap.force_plot(*args, matplotlib=False, figsize=(18, 18))
shap_html = f"<head>{shap.getjs()}</head><body>{force_plot.html()}</body>"
return html.Iframe(srcDoc=shap_html, height='1800', width='1800',style={"border": 0})#
def mplfig2html(figure):
pic_IObytes2 = io.BytesIO()
figure.savefig(pic_IObytes2, format='png')
figure.clear()
pic_IObytes2.seek(0)
return html.Img(src ='data:image/png;base64,{}'.format(base64.b64encode(pic_IObytes2.read()).decode()))
def mpl2plotlyGraph(figure):
return dcc.Graph(ptools.mpl_to_plotly(figure)) #image_height: int=600,image_width: int=800
# Build App
app = JupyterDash(__name__, external_stylesheets=[dbc.themes.MINTY]) #FLATLY, LUMEN, SUPERHERO
#server = app.server add this for binder
def convert2cytoscapeJSON(G):
# load all nodes into nodes array
final = {}
final["nodes"] = []
final["edges"] = []
for node in G.nodes():
nx = {}
nx["data"] = {}
nx["data"]["id"] = node
nx["data"]["label"] = node
final["nodes"].append(nx.copy())
#load all edges to edges array
for edge in G.edges():
nx = {}
nx["data"]={}
nx["data"]["id"]=edge[0]+edge[1]
nx["data"]["source"]=edge[0]
nx["data"]["target"]=edge[1]
final["edges"].append(nx)
return json.dumps(final)
upload_tab = [
dbc.Row(dbc.Col(dbc.Jumbotron([
html.H1("qPCR files", className="display-3"),
html.P('We are expecting csv files from an export file from a cfx96',className="lead",),
html.Hr(className="my-2"),
dbc.Row([
dbc.Col(html.H4("Column of qPCR files to merge with habitat metadata:") , width = 4),
dbc.Col(dcc.Dropdown(options = [{"label": "Sample", "value": 'Sample'}] , value = 'Sample', id='qpcrdf', disabled = True), width = 3)]),
dcc.Upload(id='upload-qPCR2',children=html.Div(['Drag and Drop or ', html.A('Select Files')]),
style={'width': '100%',
'height': '120px',
'lineHeight': '120px',
'borderWidth': '2px',
'borderStyle': 'dashed',
'font-size': '20px',
'borderRadius': '5px',
'justify-content': 'center',
'textAlign': 'center',
'margin': '10px'}, multiple=True),
html.Div(id='qpcr-data-upload') ]), width = 12),justify="center",no_gutters=True),
dbc.Row(dbc.Col(dbc.Jumbotron([
html.H1("Habitat metadata", className="display-3"),
html.P('You probably have a separate file with Lat, Lon and other environmental parameters',className="lead",),
html.Hr(className="my-2"),
dbc.Row([
dbc.Col(html.H4("Column of Habitat metadata file to merge with qPCRs:") , width = 4),
dbc.Col(dcc.Dropdown(id='habitatdf'), width = 3)]),
dcc.Upload(id='upload-habitat',children=html.Div(['Drag and Drop or ', html.A('Select Files')]),
style={'width': '100%',
'height': '120px',
'lineHeight': '120px',
'borderWidth': '2px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'font-size': '20px',
'justify-content': 'center',
'textAlign': 'center',
'margin': '10px'},multiple=True),
html.Div(id='habitat-data-upload') ]), width = 12),justify="center",no_gutters=True),
dbc.Row(dbc.Col(dbc.Jumbotron([
html.H1("Send complete dataset directly", className="display-3"),
dcc.Upload(id='upload_dataset_directly',children=html.Div(['Drag and Drop or ', html.A('Select Files')]),
style={'width': '100%', 'height': '120px', 'lineHeight': '120px', 'font-size': '20px', 'borderWidth': '2px', 'borderStyle': 'dashed', 'borderRadius': '5px', 'textAlign': 'center', 'margin': '10px'},multiple=False),
html.Div(id='direct_dataframe_upload_name')
]), width = 12),justify="center",no_gutters=True)
]
merge_tab = [
dbc.Jumbotron([
html.H1("Merged dataset overview ", className="display-3"),
html.P('Look for parameters that have unexpected behavior, dataset size and other possible concerns with data integrity',className="lead",),
html.Hr(className="my-2"),html.P(""),
dcc.Loading(id="loading-1",type="default", children=html.Div(id='Merged_df', style = {'justify-content': 'center', 'margin': '0 auto', 'width': '90%'} ) )
]),
]
VIS = [dbc.Row(dbc.Col(html.Div( id = 'keplermap', style = {'overflow': 'hidden'}), width="100%",style = {'overflow': 'clip'}), no_gutters=True,justify="center", style = {'overflow': 'hidden'}),]
kep_tab=[ dbc.Row([
dbc.Col(
[dbc.Row([
dbc.Jumbotron([
html.H4("what are the continous columns for the UMAP?", id = 'kep_tab_continuous_columns_target'),
dbc.Popover([ dbc.PopoverHeader("how we look at continuous data"),dbc.PopoverBody("https://umap-learn.readthedocs.io/en/latest/basic_usage.html")],target="kep_tab_continuous_columns_target",trigger="hover",),
dcc.Dropdown(options=[],value=[], multi=True, id = 'UMAP_cont'),
html.H4("what are the categorical columns for the UMAP?", id = 'kep_tab_cat_columns_target'),
dbc.Popover([ dbc.PopoverHeader("how we look at categorical data"),dbc.PopoverBody("see https://umap-learn.readthedocs.io/en/latest/composing_models.html#diamonds-dataset-example")],target="kep_tab_cat_columns_target",trigger="hover",),
dcc.Dropdown(options=[],value=[], multi=True, id = 'UMAP_cat'),
html.H4("Do you want to fit the UMAP to a feature?", id = 'keep_tab_metric_learn'), #https://umap-learn.readthedocs.io/en/latest/supervised.html
dbc.Popover([ dbc.PopoverHeader("fitting umap to feature"),dbc.PopoverBody("https://umap-learn.readthedocs.io/en/latest/supervised.html")],target="keep_tab_metric_learn",trigger="hover",),
dcc.Dropdown(options=[],value=[], multi=False, id = 'UMAP_y'),
html.H4("How many neighboors for the UMAP to use?", id = 'keep_tab_nneighboors'),
dbc.Popover([ dbc.PopoverHeader("n neighboors parameter"),dbc.PopoverBody("This parameter controls how UMAP balances local versus global structure in the data. It does this by \
constraining the size of the local neighborhood UMAP will look at when attempting to learn the manifold structure of the data. \
This means that low values of n_neighbors will force UMAP to concentrate on very local structure (potentially to the detriment of the big picture),\
while large values will push UMAP to look at larger neighborhoods of each point when estimating the manifold structure of the data, \
losing fine detail structure for the sake of getting the broader of the data. _ see https://umap-learn.readthedocs.io/en/latest/parameters.html#n-neighbors")],target="keep_tab_nneighboors",trigger="hover",),
dbc.Input(id="n_neighboors", type="number", value = 15, min = 10, max = 1000), #https://umap-learn.readthedocs.io/en/latest/parameters.html#n-neighbors
html.H4('Type of scaling to use:', id= 'kep_tab_scale'),
dbc.Popover([ dbc.PopoverHeader("Should I scale my data?"),dbc.PopoverBody("The default answer is yes, but, of course, the real answer is “it depends”. \
If your features have meaningful relationships with one another (say, latitude and longitude values) then normalising per feature is not a good idea. \
For features that are essentially independent it does make sense to get all the features on (relatively) the same scale. \
The best way to do this is to use pre-processing tools from scikit-learn. All the advice given there applies as sensible preprocessing for UMAP,\
and since UMAP is scikit-learn compatible you can put all of this together into a scikit-learn pipeline.")],target="kep_tab_scale",trigger="hover",),
dbc.RadioItems(id="UMAP_radio",
options=[
{"label": "No Standardization", "value": 1},
{"label": "Standard scaler", "value": 2},
{"label": "Pipeline from machine learning tab","value": 3}],value = 2,
labelCheckedStyle={"color": "#223c4f", 'font-size': '18px'},
labelStyle = {}, style = {'font-size': '18px', 'margin' : '10px', 'margin-left': '60px' ,'transform':'scale(1.2)'}, switch=True,
inputStyle = { }
),
dbc.Button("Generate UMAP", color="info", size = 'lg', className="mr-1", block=True, id='UMAP_start') ]),
dbc.Popover([ dbc.PopoverHeader("what is UMAP?"),dbc.PopoverBody("see https://umap-learn.readthedocs.io/en/latest/how_umap_works.html \nhttps://umap-learn.readthedocs.io/en/latest/scientific_papers.html\nhttps://umap-learn.readthedocs.io/en/latest/faq.html#what-is-the-difference-between-pca-umap-vaes")],target="UMAP_start",trigger="hover",),
])],width=2) ,
dbc.Col([dcc.Loading(id="loading-umap",type="default", children= dcc.Tabs([
dcc.Tab(label = 'umap-view', children = [html.Div(dcc.Graph(id='UMAP_view'), style = {'height': '1200px', 'width' : '1500px','margin-left':'30px'}),html.Div( id = 'umap_selected_stats', style = {'width': '98%'})] ),
dcc.Tab(label = 'heatmap/cytoscape', children = html.Div( id = 'cytoscape', style = {'justify-content': 'center'} )),
dcc.Tab(label = 'hdbscan clustering', children = html.Div(id='graph') ),
], style = {'justify-content': 'center', 'width': '100%','margin-left': '12px','overflow': 'clip'})) ], width=10, style = {'overflow': 'clip'})], no_gutters=True)] #
#className="nav nav-pills" , no_gutters=True autosize=False
time_series_tab = [
dbc.Row([
dbc.Col( dbc.Jumbotron([
html.H4("Target column"),
dcc.Dropdown(options=[],value=[], multi=False, id = 'prophet_y'),
html.H4("Datetime column"),
dcc.Dropdown(options=[],value=[], multi=False, id = 'prophet_ds'),
html.Hr(style= {'margin-bottom': '25px'}),
html.H4("Additional regressors"),
dcc.Dropdown(options=[],value=[], multi=True, id = 'prophet_regressors'),
html.Hr(style= {'margin-bottom': '25px'}),
html.H4('Rolling average'),
html.H5('number of days'),
dbc.Input(id="prophet_rolling_average", type="number", value = 0, min = 0, max = 366, step = 0.25),
html.Hr(style= {'margin-bottom': '25px'}),
html.H4("Growth"),
dcc.Dropdown(options=[
{"label": "logistic", "value": 'logistic'},
{"label": "flat", "value": 'flat'},
{"label": "linear", "value": 'linear'}
],value='linear', multi=False,id = 'prophet_growth'),
html.H4("Target maximum value"),
dbc.Input(id="prophet_cap", type="number", value = 1, step = .01),
html.H4("Target minimum value"),
dbc.Input(id="prophet_floor", type="number", value = 0, step = .01),
html.Hr(style= {'margin-bottom': '25px'}),
html.H4('Seasonnality'),
html.H5('frequency'),
dbc.Checklist( options = [
{"label": "Yearly", "value": 'yearly_seasonality'},
{"label": "Weekly", "value": 'weekly_seasonality'},
{"label": "Daily", "value": 'daily_seasonality'},
] ,value=['yearly_seasonality'], id = 'prophet_seasonality' ,
style = {'font-size': '18px', 'margin' : '10px', 'margin-left': '60px' ,'transform':'scale(1.2)'}, switch=True ),
html.H5('mode'),
dcc.Dropdown(options=[
{"label": "additive", "value": 'additive'},
{"label": "multiplicative", "value": 'multiplicative'}
], multi=False,id = 'seasonality_mode', value = 'additive'),
html.H5('scale'),
dbc.Input(id="season_prior", type="number", value = 10, min = 1, max = 100),
html.Hr(style= {'margin-bottom': '25px'}),
html.H4('Change points'),
html.H5('quantity'),
dbc.Input(id="prophet_n_change_points", type="number", value = 25, min = 0, max = 100,step =1),
html.H5('scale'),
dbc.Input(id="changepoint_prior", type="number", value = .05, min = 0, max = 10., step = 0.01),
html.H5('range'),
dbc.Input(id="changepoint_range", type="number", value = .8, min = 0.1, max = 1., step = 0.01),
]), width = 2),
dbc.Col(dcc.Loading(id="loading-prophet",type="default", children=html.Div(id='prophet_plots', style = {'justify-content': 'center', 'margin': '0 auto', 'width': '100%'} ), style= {'margin-top': '100px'})),
dbc.Col( dbc.Jumbotron([
html.H4('Forecast'),
html.H5('prediction range'),
dcc.DatePickerRange(id= 'prophet_future_dates', display_format='MMM DD YYYY'),
html.Hr(style= {'margin-bottom': '50px'}),
html.H5('remove these month'),
dcc.Dropdown(options=[ {"label": calendar.month_name[num], "value": num} for num in range(1,12)],value=[], multi=True,id = 'prophet_remove_months'),
html.H5('remove these days of the week'),
dcc.Dropdown(options=[ {"label": day_name, "value": num} for num,day_name in enumerate(['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'])],
value=[], multi=True,id = 'prophet_remove_days_of_the_week'),
html.H5('remove these hours of the day'),
dcc.Dropdown(options=[ {"label": str(num)+':00-'+str(num+1)+':00', "value": num} for num in range(0,24)],value=[], multi=True,id = 'prophet_remove_hours'),
html.Hr(style= {'margin-bottom': '70px'}),
dbc.Button("Run forecast", color="info", size = 'lg', className="mr-1", block=True, id='run_prophet')
]) , width = 2)
], no_gutters=True, style={'margin-bottom': '10px'})
]
transformers = [x for x in sklearn.preprocessing.__all__ + ['UMAP'] + sklearn.decomposition.__all__ + sklearn.manifold.__all__ if x[0].isupper() and x != 'SparseCoder'] + ['passthrough']
transformer_options = [ {'label': x, 'value': x } for x in transformers] # eval(x+ '()')
ML_tab = [
dbc.Row([
dbc.Col(
[dbc.Jumbotron([
dbc.Row([
dbc.Col([ html.H4("number of transformers:")]),
dbc.Col([#dcc.Dropdown(options=[ {'label': str(x), 'value': str(x)} for x in range(10)],value='2', multi=False,clearable=False, id = 'n_tabs')
dbc.Input(id="n_tabs", type="number", value = 2, min = 1, max = 10)
]),
dbc.Col([html.H4("Target:")]),
dbc.Col([dcc.Dropdown(options=[],value=[], multi=False, id = 'ML_target',clearable=False)]),
dbc.Col([html.H4("Classifier:", id = 'ml_tab_classifier'), dbc.Popover([ dbc.PopoverHeader("chosing a classifier"),dbc.PopoverBody('see: \
https://scikit-learn.org/stable/supervised_learning.html#supervised-learning\n https://lightgbm.readthedocs.io/en/latest/Quick-Start.html ')],target="ml_tab_classifier",trigger="hover",)]),
dbc.Col([dcc.Dropdown(options=[ {'label': x, 'value': x} for x in ['LGBMClassifier', 'LGBMRegressor'] + sklearn.ensemble.__all__ + sklearn.linear_model.__all__]
,value = 'RandomForestClassifier', multi=False, id = 'clf_disp', clearable=False)]) ])]),
dbc.Jumbotron([dbc.Row([dbc.Col(
[html.H4("Columns to be transformed:")] +
[ dcc.Dropdown(options= ['0'], value = ['0'],multi=True,clearable=False, id = 'Columns_'+ str(i)) for i in range(3)], id = 'preprocessing_columns'),
dbc.Col(
[html.H4("Column transformers:", id = 'ml_tab_column_trans')] + #https://scikit-learn.org/stable/modules/preprocessing.html#
[ dcc.Dropdown(options= transformer_options, value = ['passthrough'], multi=True,clearable=False, id = 'ColumnTransformer_'+ str(i)) for i in range(3)], id = 'preprocessing_functions'),
dbc.Popover([ dbc.PopoverHeader("preprocessing the data"),dbc.PopoverBody("see:\n https://scikit-learn.org/stable/modules/preprocessing.html\n\
https://scikit-learn.org/stable/modules/decomposition.html#decompositions#\nhttps://scikit-learn.org/stable/modules/clustering.html#clustering")],target="ml_tab_column_trans",trigger="hover",)
])])
],width=6, id='ml_user_input'), ] + [dbc.Col([dbc.Button("Update Pipeline", color="info", size = 'lg', className="mr-1", block=True, id='submit_pipe'),
html.Div(id = 'show_pipeline', style ={'width': '50%','borderWidth': '0px' ,'border': 'white'})], width = 6)], no_gutters=True,justify="center"),
dbc.Row([dbc.Col(
dbc.Jumbotron([
dbc.Row([ html.H1("Testing the pipeline", style ={'margin': '20px'})]), #,justify="center"
dbc.Row([dbc.Col([html.H4("Number of runs for hyperparameter optimization:", id = 'ml_tab_tunning')], width = 3),
dbc.Popover([ dbc.PopoverHeader("Tunning the model"),dbc.PopoverBody("here we use scikit optimize's bayesian optimization to tune the hyperparameters\
https://scikit-optimize.github.io/stable/auto_examples/bayesian-optimization.html")],target="ml_tab_tunning",trigger="hover",),
dbc.Col([dbc.Input(id="slider_hyperopt", type="number", value = 50, min = 10, max = 1000)], width = 1)], no_gutters=True, style={'margin-bottom': '10px'}), #
dbc.Row([dbc.Button("Run pipeline", color="info", size = 'lg', className="mr-1", block=True, id='run_ML')]),
dcc.Loading(id="loading-ml",type="default", children=html.Div(id = 'ml_results', style = {'justify-content': 'center', 'margin': '0 auto', 'width': '2200', 'height' : '1400px'}),
style= {'margin-top': '-300px','justify-content': 'center'})])
, width = 12, style = {'justify-content': 'center', 'height' : '2000px'}) ], no_gutters=True)
]
# html.Iframe(srcDoc = ret_map._repr_html_().decode(), height='1280', width='2350') iframe for html representation of pipeline sklearn
tab_style = {
"background": "#223c4f",
'color': "#6cc3d5",
'text-transform': 'lowercase',
'border': '#223c4f',
'font-size': '12px',
'font-weight': 200,
'align-items': 'center',
'justify-content': 'center',
'border-radius': '0px',
#'padding':'6px'
}
tab_selected_style = {
"background": "#153751",
'color': 'white',
'text-transform': 'uppercase',
'font-size': '12px',
'font-weight': 200,
'align-items': 'center',
'justify-content': 'center',
#'box-shadow': '60px 0 #223c4f, -60px 0 solid #223c4f',
'border-style': 'solid #223c4f',
'border-color': '#223c4f',
'border-width': '0',
#'border-radius': '50px'
}
app.layout = html.Div([
dbc.NavbarSimple([], brand = 'Sars-Cov-2 genome viewer', brand_style ={'color': "white",'font-size': '14px'} ,
style = { 'align-items': 'left','justify-content': 'left', 'font-size': '14px', 'height': '40px'},
color = "#223c4f"),
dcc.Store(id='all_qPCR_concat', storage_type='memory'), #storage_type='local'
dcc.Store(id='habitatcsv', storage_type='memory'), #df_with_umap
dcc.Store(id='df', storage_type='memory'),
dcc.Store(id='df_with_umap', storage_type='memory'),
dcc.Store(id='umap_select_columns', storage_type='memory'),
dcc.Store(id='selected_points_umap', storage_type='memory'), #html.Table(id='all_dfs') selected_points_umap
dcc.Tabs([
dcc.Tab(label = 'Dataset', children = upload_tab , style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label = 'Quality Control', children = merge_tab , style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='Exploratory Data Analysis', children=kep_tab, style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='Geoposition', children=VIS, style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='Time Series', children=time_series_tab, style=tab_style, selected_style=tab_selected_style),
dcc.Tab(label='Machine Learning', children=ML_tab, style=tab_style, selected_style=tab_selected_style)],className="nav nav-pills") ,
])
def get_cq_list(x, df):
a = df[df.Sample == x].Cq.values
return [60 if np.isnan(x) else x for x in a]
def get_det_list(x, df):
a = df[df.Sample == x].Call.values
return [1 if x=='(+) Positive' else 0 for x in a]
def FIND_Better(row, column, df):
series = df[df.index == str(row['SiteID'])][column]
if series.shape[0] == 0: return -1
return series.iloc[0]
cyto.load_extra_layouts()
@app.callback(Output(component_id= 'Merged_df', component_property ='children'),
Output(component_id= 'df', component_property ='data'),
Output(component_id= 'direct_dataframe_upload_name', component_property = 'children'),
Input('habitatdf', 'value'),
Input('upload_dataset_directly', 'contents'),
State('upload_dataset_directly', 'filename'),
State('upload_dataset_directly', 'last_modified'),
State('all_qPCR_concat', 'data'),
State('habitatcsv', 'data'))
def merge_csv_update_spreadsheet(hab, up_content, up_filename, up_date , df_qpcr_json, df_hab_json): #qpcr,
ctx = dash.callback_context.triggered[0]['prop_id'].split('.')[0]
if hab != None and ctx == 'habitatdf': # and qpcr != None:
try : left_merge, right_merge = hab, 'Sample' #qpcr
except:
return html.Div(), html.Hr(className="my-2"), html.Div(),
try: df, df_hab = | pd.read_json(df_qpcr_json) | pandas.read_json |
import numpy as np
import itertools
import pandas as pd
from keras.optimizers import Adam, SGD
from environment import Env
from agents import AgentReinforce
from train import train_reinforce
from simulate import portfolio_safe, portfolio_myopic, portfolio_risky, \
portfolio_reinforce
from helpers import all_close
# suppress scientific notation for numpy and pandas printouts:
np.set_printoptions(suppress=True)
pd.options.display.float_format = '{:5,.5f}'.format
if __name__ == '__main__':
# HYPERPARAMETERS #
# algorithm setup:
train_episodes = 100000
eval_episodes = 10000
pi_update = 1000
# agent variables:
dim_state = 2
dim_actions = 11
hidden_dims = (64, 64, 64)
optimizer = Adam()
gamma = 1.
# environment variables:
start = "random"
tcost = 0.0
horizon = 1
w = 1000.
theta = 1.
mu = np.array([0, 0])
sigma = np.array([0, 1])
# initiliaze the RL-agent:
agent = AgentReinforce(dim_state=dim_state,
dim_actions=dim_actions,
hidden_dims=hidden_dims,
optimizer=optimizer,
gamma=gamma)
# initiliaze the environment:
env = Env(start=start,
tcost=tcost,
horizon=horizon,
w=w,
theta=theta,
mu=mu,
sigma=sigma)
# TRAINING #
print("\n===TRAINING===\n")
trained_agent, train_loss, train_states, \
train_actions, train_rewards = train_reinforce(agent=agent,
environment=env,
episodes=train_episodes,
policy_update=pi_update)
# SIMULATION #
print("\n===SIMULATION===\n")
fu_safe, alloc_safe, ret_safe = portfolio_safe(
eval_episodes=eval_episodes,
environment=env
)
fu_myopic, alloc_myopic, ret_myopic = portfolio_myopic(
eval_episodes=eval_episodes,
environment=env
)
fu_risky, alloc_risky, ret_risky = portfolio_risky(
eval_episodes=eval_episodes,
environment=env
)
fu_reinforce, alloc_reinforce, ret_reinforce = portfolio_reinforce(
eval_episodes=eval_episodes,
environment=env,
agent=trained_agent
)
# EVALUATION #
print("\n===EVALUATION===\n")
# create start state representation:
start_state = np.array([[0 / env.horizon] + [env.w / env.w]])
actions = | pd.DataFrame(train_actions) | pandas.DataFrame |
# # # # # # # # # # # # # # # # # # # # # # # #
# #
# Module to plot results of #
# real time contingencies assessmemnt #
# By: <NAME> #
# 09-08-2018 #
# Version Aplha-0.1 #
# #
# # # # # # # # # # # # # # # # # # # # # # # #
import pandas as pd
import numpy as np
import calendar
import matplotlib.pyplot as plt
import seaborn as sns
from math import pi
from scipy import stats
import matplotlib.ticker as ticker
color_limits = ['#259B00','#CFE301','#FF5733','#900C3F']
hi_limits = [0.25,0.5,0.75,1]
cum_ens_pof = [0.85,0.95,0.99]
warn_colors =['#259B00','#CFE301','#FF5733','#900C3F','#339900','#99CC33','#FFCC00','#FF9966','#CC3300']
warn_colors = sns.color_palette(warn_colors)
pkmn_type_colors = ['#78C850', # Grass
'#F08030', # Fire
'#6890F0', # Water
'#A8B820', # Bug
#'#A8A878', # Normal
'#A040A0', # Poison
'#F8D030', # Electric
#'#E0C068', # Ground
#'#EE99AC', # Fairy
'#C03028', # Fighting
'#F85888', # Psychic
'#B8A038', # Rock
'#705898', # Ghost
'#98D8D8', # Ice
'#7038F8', # Dragon
]
#-> # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Plot_Security_Opteration(DATA,DAY,S_base,TR,LN,BU):
df = DATA[DATA.Day==DAY]
if not BU:
df = df[(df.Type != 'BUS')]
if not TR:
df = df[(df.Type != 'TR')]
if not LN:
df = df[(df.Type != 'LN')]
x,y,s,c = DF_to_List(df,S_base,BU)
l_plot,ax = Plot_Scater(x,y,s,c,BU,LN)
fig = Scater_Labels(l_plot,ax,S_base,BU=BU,LN=LN)
return fig
def Plot_All_Days_Hour_Data(DF,text,S_base=1,TR=True,LN=True,BU=False,day_list=None):
if day_list==None:
day_list = list(calendar.day_name)
for day in day_list:
#df = DF[DF.Day==day]
#if not BU:
# df = df[(df.Type != 'BUS')]
#if not TR:
# df = df[(df.Type != 'TR')]
#if not LN:
# df = df[(df.Type != 'LN')]
#x,y,s,c = DF_to_List(df,S_base,BU)
#l_plot,ax = Plot_Scater(x,y,s,c,BU,LN)
#Scater_Labels(l_plot,ax,S_base,BU=BU,LN=LN)
fig = Plot_Security_Opteration(DF,day,S_base,TR,LN,z)
plt.savefig(text+day+'.pdf', bbox_inches = "tight")
plt.close()
#-> # # # # # # # # # # # # # # # # # # # # # # # # # # #
def DF_to_List(DF,s_base,BU):
x,y,s,c = [],[],[],[]
for n,row in DF.iterrows():
y.append(row['Name'])
x.append(row['Hour'])
if BU:
c.append(abs(1-row['Loading']))
s.append(0.00001*np.power(row['Loading']*4.75,10))
else:
c.append(row['Loading'])
s.append(5*100*row['Load']/s_base)
#print(s)
return x,y,s,c
def Plot_Scater(x,y,s,c,BU,LN=True):
from collections import Counter
#size_y = int(len(Counter(y).keys())/4)
#size_y = int(len(Counter(y).keys())/2)
#fig, ax = plt.subplots(figsize=(10,8))
fig, ax = plt.subplots(figsize=(16,9))
if BU:
#fig, ax = plt.subplots(figsize=(12,12))
plot = ax.scatter(x, y, s=s,alpha=0.85,c=c,cmap='jet', vmin=0,vmax=0.15)
else:
plot = ax.scatter(x, y, s=s,alpha=0.85,c=c,cmap='jet', vmin=15,vmax=135)
return plot,ax
def Scater_Labels(plot,ax,s_base,BU=False,LN=False):
if BU:
legend1 = ax.legend(*plot.legend_elements(num=4,fmt=" {x:.2f}"),loc="upper left", title="$|1-U_{k_{pu}}|$",fontsize=16)
kw = dict(prop="sizes", num=3, color='gray',alpha=0.5, fmt=" {x:.2f}",func=lambda s: np.power(s/0.00001,1/10)/4.75)
ax.legend(*plot.legend_elements(**kw),loc="upper right", title="Voltage-[Pu]",fontsize=16)
#ax.xaxis.set_tick_params(labelsize=18)
ax.xaxis.set_tick_params(labelsize=10)
ax.yaxis.set_tick_params(labelsize=10)
ax.set_xlabel('Time - [h]', fontsize=16)
else:
legend1 = ax.legend(*plot.legend_elements(num=4),loc="upper left", title="Loading-[%]",fontsize=16)
kw = dict(prop="sizes", num=4, color='gray',alpha=0.5, fmt=" {x:.1f}",func=lambda s: s*s_base/(100*5))
ax.legend(*plot.legend_elements(**kw),loc="upper right", title="Load-[MVA]",fontsize=16)
#ax.yaxis.set_tick_params(labelsize=18)
ax.yaxis.set_tick_params(labelsize=10)
ax.set_xlabel('Time - [h]', fontsize=16)
#if LN:
ax.xaxis.set_tick_params(labelsize=10)
ax.add_artist(legend1)
#-> # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Plot_Stack_By_Day(DATA,DAY):
#fig, ax = plt.subplots(figsize=(8,5))
fig, ax = plt.subplots(figsize=(16,9))
df = DATA[DATA.Day==DAY]
df = df.drop(columns="Day")
df_pivot = df.pivot(index='Hour', columns='Name', values='Load')
df_pivot.plot.area(ax=ax)
#->ax.legend(loc='lower center', ncol=7, bbox_to_anchor=(0.5, 1), fontsize='x-small')
ax.legend(loc='lower center', ncol=9, bbox_to_anchor=(0.5, 1))
ax.set(ylabel='Load - [MVA]')
ax.set_xlabel('Time - [h]', fontsize=16)
ax.xaxis.set_tick_params(labelsize=14)
ax.yaxis.set_tick_params(labelsize=12)
plt.xlim(0,df['Hour'].max())
return fig
def Plot_Stack(DF,text,day_list=None):
if day_list==None:
day_list = list(calendar.day_name)
for day in day_list:
#fig, ax = plt.subplots(figsize=(8,5))
#df = DF[DF.Day==day]
#df = df.drop(columns="Day")
#df_pivot = df.pivot(index='Hour', columns='Name', values='Load')
#df_pivot.plot.area(ax=ax)
#ax.legend(loc='lower center', ncol=7, bbox_to_anchor=(0.5, 1), fontsize='x-small')
#ax.set(ylabel='Load - [MVA]')
#ax.set_xlabel('Time - [h]', fontsize=16)
#ax.xaxis.set_tick_params(labelsize=14)
#ax.yaxis.set_tick_params(labelsize=12)
#plt.xlim(0,df['Hour'].max())
fig = Plot_Stack_By_Day(DF,day)
plt.savefig(text+day+'_Load.pdf', bbox_inches = "tight")
plt.close()
#-> # # # # # # # # # # # # # # # # # # # # # # # # # # #
def Plot_Histogram(DF,Asset_List,Type=None):
#->if Type==None:
#-> asset_port = Asset_List.Name.values.tolist()
#->else:
#-> asset_port = Asset_List[Asset_List['Type']==Type]
#-> asset_port = asset_port.Name.values.tolist()
print('Test Test')
asset_port = ['TR_1', 'TR_2', 'TR_3', 'TR_4', 'TR_5', 'TR_6', 'TR_7', 'TR_8', 'TR_9', 'TR_10', 'TR_11', 'TR_12']
data_by_trail = {}
data_asset_by_trail = {}
data_by_trail_1 = {}
for trail in DF['Ite'].unique():
df = DF[DF['Ite']==trail]
data_by_trail_1[trail] = df['Cr'].sum()
if df['Cr'].sum()>0:
data_by_trail[trail] = df['Cr'].sum()
test_dic = {}
for asset in asset_port:
df_a = df[df[asset]==True] # Check if the asset fail
test_dic[asset]= df_a['Cr'].sum()
data_asset_by_trail[trail] = test_dic
df_total = pd.DataFrame.from_dict(data_by_trail, orient='index')
df_by_asset = pd.DataFrame.from_dict(data_asset_by_trail, orient='index')
#for asset in asset_port:
# if df_by_asset[asset].sum()>0:
# sns.distplot(df_by_asset[asset],label=asset, kde=False , bins=50)
#sns.distplot(df_total,label='Portfolio', kde=False, bins=50)
#kwargs = {'cumulative': True}
x = df_total.values
import matplotlib.pyplot as plt
weights = np.ones_like(x)/500
plt.hist(x, weights=weights)
df_total = pd.DataFrame.from_dict(data_by_trail_1, orient='index')
x = df_total.values
plt.hist(x, cumulative = True,alpha=0.25,density=True)
#print(df_total)
#sns.kdeplot(df_total, cumulative=True)
#df_total.hist( cumulative = True )
#x = df_total.values
plt.xlabel('Energy not supplied (ENS) - [MWh]')
plt.legend()
plt.ylabel('Density')
plt.show()
#plt.savefig('Histogram_ENS_Load.pdf', bbox_inches = "tight")
#plt.close()
def Plot_Histogram_ENS(DF,Asset_port,Years,Type=None,Factor='GWh'):
#->if Type==None:
#-> asset_port = Asset_List.Name.values.tolist()
#->else:
#-> asset_port = Asset_List[Asset_List['Type']==Type]
#-> asset_port = asset_port.Name.values.tolist()
data_by_trail = {}
data_dic = {}
for year in Years:
df_by_year = DF[DF.Date.dt.year<=year]
data_by_trail = {}
data_temp = []
for trail in DF['Ite'].unique():
df = df_by_year[df_by_year['Ite']==trail]
data_by_trail[trail] = df['Cr'].sum()
if Factor=='GWh':
data_temp.append(df['Cr'].sum()/1e3)
else:
data_temp.append(df['Cr'].sum())
data_dic[year] = data_temp
df = pd.DataFrame.from_dict(data_dic)
# Plot histogram with seaborn
sns.set_palette("Set1")
fig, ax = plt.subplots()
for year in Years:
#kwargs = {'cumulative': True,'bw':3,'cut':0,'shade':True}
kwargs = {'cumulative': True,'bw':3,'cut':0,'shade':True}
sns.distplot(df[year], hist=False, kde_kws=kwargs,norm_hist=True,kde=True,label=year,ax=ax)
# Show vertical lines
data_x, data_y = ax.lines[-1].get_data()
color = ax.lines[-1].get_c()
for yi in cum_ens_pof: # coordinate where to find the value of kde curve
xi = np.interp(yi,data_y,data_x)
y = [0,yi]
x = [xi,xi]
ax.plot(x, y,color=color,ls='--',linewidth= 1,alpha=0.5) # Vertical line
if year==Years[-1]: # Plot horizontal lines just for the last year - avoid overplotting
y = [yi,yi]
x = [0,xi]
ax.plot(x, y,color='black',ls=':',linewidth= 0.5,alpha=0.75) # Horizontal line
plt.xlabel('Energy not supplied (ENS) - ['+Factor+']')
plt.legend()
plt.ylabel('Density')
plt.xlim(0,df[year].max())
plt.ylim(0,1.05)
plt.savefig('RESULTS/Histogram_ENS_Cumulative_Load.pdf', bbox_inches = "tight")
# Plot histogram with seaborn
for year in Years:
fig, ax1 = plt.subplots(figsize= [4, 3])
kwargs_hist = {'cumulative': False,'bw':0.5,'cut':0}
kwargs = {'cumulative': False,"alpha": 0.75,"linewidth": 1.5,'edgecolor':'#000000'}
sns.distplot(df[year], hist_kws=kwargs, kde_kws=kwargs_hist,norm_hist=True,kde=False,ax=ax1)
plt.xlabel('Energy not supplied (ENS) - ['+Factor+']')
plt.ylabel('Density')
plt.xlim(0,df[year].max())
#plt.savefig('RESULTS/'+str(year)+'_Histogram_ENS_Load.pdf', bbox_inches = "tight",label=year)
plt.savefig('RESULTS/'+str(year)+'_Histogram_ENS_Load.pdf', bbox_inches = "tight")
plt.close()
def Risk_Matrix_ENS(DF,Asset_dF,year_list,N,Type=None,Factor='GWh'):
#->if Type==None:
#-> asset_port = Asset_List.Name.values.tolist()
#->else:
#-> asset_port = Asset_List[Asset_List['Type']==Type]
#-> asset_port = asset_port.Name.values.tolist()
#-> N -> Number of montecarlo trials
data_dic = {}
pof_dic = []
ens = []
ri_dic = []
asset_name = []
year_val = []
mttr_dic = []
a_dic = []
saidi_dic = []
y_beg= min(DF.Date.dt.year)-1
for asset_id in Asset_dF.index:
asset = Asset_dF.loc[asset_id]
for year in year_list:
df_by_year = DF[DF.Date.dt.year<=year]
df_asset = df_by_year[df_by_year[asset.Name]==True] # Check if the asset fail
mttr = asset.MTTR
trials = 24*365*(year-y_beg)*N
D_time = 24*365*(year-y_beg)/mttr # Cumulative years
#lam = D_time*(len(df_asset)/len(df_by_year))
A = 100*((trials-len(df_asset))/trials) # Avaliability
lam = D_time*(len(df_asset)/trials)
pof = 1-np.exp(-lam)
pof_dic.append(100*pof)
ens_rms = np.sqrt(np.mean(df_asset['Cr'].values**2))
saidi_rms = np.sqrt(np.mean(df_asset['SAIDI'].values**2))
ens.append(ens_rms)
mttr_dic.append(mttr)
asset_name.append(asset.Name)
year_val.append(year)
ri_dic.append(pof*ens_rms) # Risk index
a_dic.append(A)
saidi_dic.append(saidi_rms)
#A = len(df_asset)/(len(df_by_year))
data_dic['Name'] = asset_name
data_dic['Year'] = year_val
data_dic['POF'] = pof_dic
data_dic['MTTR'] = mttr_dic
data_dic['A'] = a_dic
data_dic['SAIDI'] = saidi_dic
if Factor=='GWh':
data_dic['ENS'] = [n/1000 for n in ens]
data_dic['RI'] = [round(n/1000,2) for n in ri_dic]
else:
data_dic['ENS'] = ens
data_dic['RI'] = ri_dic
df_data = | pd.DataFrame.from_dict(data_dic) | pandas.DataFrame.from_dict |
from unittest import TestCase
import pandas as pd
import numpy as np
from moonstone.normalization.counts.geometric_mean import (
GeometricMeanNormalization
)
class TestGeometricMeanNormalization(TestCase):
def setUp(self):
data = [
[255, 26, 48, 75],
[366, 46, 78, 0],
[955, 222, 46, 65],
[89, 54, 145, 29]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", 'Gen_2', "Gen_3", 'Gen_4']
self.dummy_df = pd.DataFrame(data, columns=column_names, index=ind)
def test_check_format(self):
tested_object = GeometricMeanNormalization(self.dummy_df)
pd.testing.assert_frame_equal(tested_object.df, self.dummy_df)
def test_non_zero_df(self):
tested_object = GeometricMeanNormalization(self.dummy_df, zero_threshold=80)
data = [
[255, 26, 48, 75],
[955, 222, 46, 65],
[89, 54, 145, 29]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", "Gen_3", 'Gen_4']
expected_result = pd.DataFrame(data, columns=column_names, index=ind).astype('float')
pd.testing.assert_frame_equal(tested_object.non_zero_df(self.dummy_df), expected_result)
def test_non_zero_df_threshold70(self):
tested_object = GeometricMeanNormalization(self.dummy_df, zero_threshold=70, normalization_level=0)
data = [
[255, 26, 48, 75],
[366, 46, 78, np.nan],
[955, 222, 46, 65],
[89, 54, 145, 29]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", 'Gen_2', "Gen_3", 'Gen_4']
expected_result = pd.DataFrame(data, columns=column_names, index=ind).astype('float')
pd.testing.assert_frame_equal(tested_object.non_zero_df(self.dummy_df), expected_result)
def test_log_df(self):
input_data = [
[255, 26, 48, 75],
[955, 222, 46, 65],
[89, 54, 145, 29]
]
input_column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
input_ind = ["Gen_1", "Gen_3", 'Gen_4']
input_df = pd.DataFrame(input_data, columns=input_column_names, index=input_ind)
tested_object = GeometricMeanNormalization(self.dummy_df)
data = [
[5.541264, 3.258097, 3.871201, 4.317488],
[6.861711, 5.402677, 3.828641, 4.174387],
[4.488636, 3.988984, 4.976734, 3.367296]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", "Gen_3", 'Gen_4']
expected_result = pd.DataFrame(data, columns=column_names, index=ind)
pd.testing.assert_frame_equal(tested_object.log_df(input_df), expected_result)
def test_log_base_n_df(self):
input_data = [
[255, 26, 48, 75],
[955, 222, 46, 65],
[89, 54, 145, 29]
]
input_column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
input_ind = ["Gen_1", "Gen_3", 'Gen_4']
input_df = pd.DataFrame(input_data, columns=input_column_names, index=input_ind)
tested_object = GeometricMeanNormalization(self.dummy_df, log_number=10)
data = [
[2.406540, 1.414973, 1.681241, 1.875061],
[2.980003, 2.346353, 1.662758, 1.812913],
[1.949390, 1.732394, 2.161368, 1.462398]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", "Gen_3", 'Gen_4']
expected_result = pd.DataFrame(data, columns=column_names, index=ind)
pd.testing.assert_frame_equal(tested_object.log_df(input_df),
expected_result)
def test_removed_zero_df_None(self):
data = [
[255, 26, 48, 75],
[366, 0, 78, 0],
[955, 0, 46, 65],
[89, 54, 145, 29]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", 'Gen_2', "Gen_3", 'Gen_4']
dummy_df = pd.DataFrame(data, columns=column_names, index=ind)
tested_object = GeometricMeanNormalization(dummy_df)
expected_result = None
self.assertEqual(tested_object.removed_zero_df, expected_result)
def test_removed_zero_df(self):
data = [
[255, 26, 48, 75],
[366, 0, 78, 0],
[955, 0, 46, 65],
[89, 54, 145, 29]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", 'Gen_2', "Gen_3", 'Gen_4']
dummy_df = pd.DataFrame(data, columns=column_names, index=ind)
tested_object = GeometricMeanNormalization(dummy_df)
data = [
[366, 0, 78, 0],
[955, 0, 46, 65],
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ['Gen_2', "Gen_3"]
expected_result = pd.DataFrame(data, columns=column_names, index=ind)
scaling_factors = tested_object.scaling_factors # noqa
pd.testing.assert_frame_equal(tested_object.removed_zero_df,
expected_result)
def test_calculating_and_substracting_mean_row(self):
input_data = [
[5.541264, 3.258097, 3.871201, 4.317488],
[6.861711, 5.402677, 3.828641, 4.174387],
[4.488636, 3.988984, 4.976734, 3.367296]
]
input_column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
input_ind = ["Gen_1", "Gen_3", 'Gen_4']
input_df = pd.DataFrame(input_data, columns=input_column_names, index=input_ind)
tested_object = GeometricMeanNormalization(self.dummy_df)
data = [
[1.294251, -0.988916, -0.375811, 0.070475],
[1.794857, 0.335823, -1.238213, -0.892467],
[0.283224, -0.216428, 0.771321, -0.838117]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", "Gen_3", 'Gen_4']
expected_result = pd.DataFrame(data, columns=column_names, index=ind)
pd.testing.assert_frame_equal(tested_object.calculating_and_substracting_mean_row(input_df).round(6),
expected_result)
def test_scaling_factor_zero_thresh_100(self):
tested_object = GeometricMeanNormalization(self.dummy_df, zero_threshold=100)
data = [3.648263, 0.805390, 0.686732, 0.432524]
ind = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
expected_result = pd.Series(data, index=ind)
pd.testing.assert_series_equal(tested_object.scaling_factors, expected_result)
def test_scaling_factor_zero_thresh_80(self):
tested_object = GeometricMeanNormalization(self.dummy_df, zero_threshold=80)
data = [3.648263, 0.805390, 0.686732, 0.432524]
ind = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
expected_result = pd.Series(data, index=ind)
pd.testing.assert_series_equal(tested_object.scaling_factors, expected_result)
def test_scaling_factor_zero_thresh_70(self):
tested_object = GeometricMeanNormalization(self.dummy_df, zero_threshold=70)
data = [3.495248, 0.612726, 0.699505, 0.432524]
ind = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
expected_result = pd.Series(data, index=ind)
pd.testing.assert_series_equal(tested_object.scaling_factors, expected_result)
def test_scaling_factor_zero_thresh_70_more_zeros(self):
data = [
[255, 26, 48, 75],
[366, 0, 78, 0],
[955, 0, 46, 65],
[89, 54, 145, 29]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", 'Gen_2', "Gen_3", 'Gen_4']
more_zero_example = pd.DataFrame(data, columns=column_names, index=ind)
tested_object = GeometricMeanNormalization(more_zero_example, zero_threshold=70)
data = [3.648263, 0.588685, 0.686732, 0.458165]
ind = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
expected_result = pd.Series(data, index=ind)
pd.testing.assert_series_equal(tested_object.scaling_factors, expected_result)
def test_scaling_factor_zero_thresh_80_more_zeros(self):
data = [
[255, 26, 48, 75],
[366, 0, 78, 0],
[955, 0, 46, 65],
[89, 54, 145, 29]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", 'Gen_2', "Gen_3", 'Gen_4']
more_zero_example = pd.DataFrame(data, columns=column_names, index=ind)
tested_object = GeometricMeanNormalization(more_zero_example, zero_threshold=80)
data = [2.487833, 0.588685, 1.424677, 0.752771]
ind = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
expected_result = pd.Series(data, index=ind)
pd.testing.assert_series_equal(tested_object.scaling_factors, expected_result)
def test_normalized_df(self):
tested_object = GeometricMeanNormalization(self.dummy_df, zero_threshold=100)
data = [
[69.896271, 32.282490, 69.896271, 173.400644],
[100.321707, 57.115175, 113.581441, 0.000000],
[261.768388, 275.642802, 66.983926, 150.280558],
[24.395169, 67.048249, 211.144986, 67.048249]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", "Gen_2", "Gen_3", 'Gen_4']
expected_result = | pd.DataFrame(data, columns=column_names, index=ind) | pandas.DataFrame |
"""BLEU SCORE
@author: vasudevgupta
"""
import nltk
import numpy as np
import pandas as pd
class Bleu:
def __init__(self, N=4):
"""GET THE BLEU SCORE
INPUT THE TARGET AND PREDICTION
"""
self.N = N
def get_score(self, target, pred):
ngrams_prec = []
for n in range(1, self.N+1):
precision = self.get_Ngram_precision(target, pred, n)
ngrams_prec.append(precision)
len_target = np.mean([len(targ) for targ in target]) if type(
target[0]) == list else len(target)
len_penalty = 1 if len(pred) >= len_target else (
1 - np.exp(len_target/len(pred)))
self.bleu_scr = len_penalty*(np.product(ngrams_prec)**0.25)
return self.bleu_scr
def get_Ngram_precision(self, target, pred, n):
new_pred = list(nltk.ngrams(pred, n))
count_pred = self._counter(new_pred)
# if there are more than 2 sents in reference
if type(target[0]) == list:
new_target = [list(nltk.ngrams(target[i], n))
for i in range(len(target))]
count_target = [self._counter(new_target[i])
for i in range(len(new_target))]
scores = [[np.min([count_pred[tok], count_target[i][tok]])
if tok in new_target[i] else 0
for tok in count_pred.keys()]
for i in range(len(new_target))]
final_score = np.max(scores, axis=0)
else:
new_target = list(nltk.ngrams(target, n))
count_target = self._counter(new_target)
final_score = [np.min([count_pred[tok], count_target[tok]])
if tok in new_target else 0 for tok in count_pred.keys()]
# just for ensuring that no errors happen
len_pred = len(new_pred) if len(new_pred) > 0 else 1
precisions = np.sum(final_score)/len_pred
return precisions
def _counter(self, ls):
"""Returns a dict with freq of each element in ls
"""
freq = | pd.Series(ls) | pandas.Series |
"""Utils module."""
import click
import os.path
import pandas as pd
from tensorflow.keras.models import load_model
from tensorflow.keras.regularizers import l1_l2
from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, TensorBoard
from zalando_classification.models import build_model
def get_basename(name, split_num):
return f"{name}.split{split_num:d}"
def get_model_filename_fmt(basename):
return f"{basename}.{{epoch:02d}}.h5"
def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,
batch_norm, l1_factor, l2_factor, optimizer):
"""
Attempt to load the specified model (including architecture, weights, and
even optimizer states). If this is not possible, build a new model from
scratch.
"""
basename = get_basename(name, split_num)
model_filename_fmt = get_model_filename_fmt(basename)
model_filename = model_filename_fmt.format(epoch=resume_from_epoch)
checkpoint_path = os.path.join(checkpoint_dir, model_filename)
if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):
click.secho(f"Found model checkpoint '{checkpoint_path}'. "
f"Resuming from epoch {resume_from_epoch}.", fg='green')
model = load_model(checkpoint_path)
initial_epoch = resume_from_epoch
else:
click.secho(f"Could not load model checkpoint '{checkpoint_path}' "
"or `resume_from_epoch == 0`. Building new model.",
fg='yellow')
model = build_model(output_dim=1, batch_norm=batch_norm,
kernel_regularizer=l1_l2(l1_factor, l2_factor))
# optimizer = Adam(beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
initial_epoch = 0
return model, initial_epoch
def build_callbacks(name, split_num, summary_dir, checkpoint_dir,
checkpoint_period):
basename = get_basename(name, split_num)
model_filename_fmt = get_model_filename_fmt(basename)
tensorboard_path = os.path.join(summary_dir, basename)
csv_path = os.path.join(summary_dir, f"{basename}.csv")
checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt)
callbacks = []
callbacks.append(TensorBoard(tensorboard_path, profile_batch=0))
callbacks.append(CSVLogger(csv_path, append=True))
callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period))
return callbacks
def make_plot_data(names, splits, summary_dir, pretty_name_mapping=None):
df_list = []
for name in names:
for split_num in splits:
basename = get_basename(name, split_num)
csv_path = os.path.join(summary_dir, f"{basename}.csv")
df = | pd.read_csv(csv_path) | pandas.read_csv |
"""Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- <NAME> & <NAME>
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from unittest import TestCase
from datetime import datetime, timedelta
from numpy.ma.testutils import assert_equal
from pandas.tseries.period import Period, PeriodIndex
from pandas.tseries.index import DatetimeIndex, date_range
from pandas.tseries.tools import to_datetime
import pandas.core.datetools as datetools
import numpy as np
from pandas import Series, TimeSeries
from pandas.util.testing import assert_series_equal
class TestPeriodProperties(TestCase):
"Test properties such as year, month, weekday, etc...."
#
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_interval_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEquals(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assert_(i1 != i4)
self.assertEquals(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/12/12', freq='D')
self.assertEquals(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEquals(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEquals(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEquals(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEquals(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEquals(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEquals(i1, i2)
self.assertEquals(i1, i3)
i1 = Period('05Q1')
self.assertEquals(i1, i2)
lower = Period('05q1')
self.assertEquals(i1, lower)
i1 = Period('1Q2005')
self.assertEquals(i1, i2)
lower = Period('1q2005')
self.assertEquals(i1, lower)
i1 = Period('1Q05')
self.assertEquals(i1, i2)
lower = Period('1q05')
self.assertEquals(i1, lower)
i1 = Period('4Q1984')
self.assertEquals(i1.year, 1984)
lower = Period('4q1984')
self.assertEquals(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEquals(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEquals(i1, i2)
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assert_(i1.freq[0] != '1')
i2 = Period('11/30/2005', freq='2Q')
self.assertEquals(i2.freq[0], '2')
def test_to_timestamp(self):
intv = Period('1982', freq='A')
start_ts = intv.to_timestamp(which_end='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEquals(start_ts, intv.to_timestamp(which_end=a))
end_ts = intv.to_timestamp(which_end='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEquals(end_ts, intv.to_timestamp(which_end=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
for i, fcode in enumerate(from_lst):
intv = Period('1982', freq=fcode)
result = intv.to_timestamp().to_period(fcode)
self.assertEquals(result, intv)
self.assertEquals(intv.start_time(), intv.to_timestamp('S'))
self.assertEquals(intv.end_time(), intv.to_timestamp('E'))
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.day_of_year, 1)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.day_of_year, 1)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.day_of_year, 1)
assert_equal(h_date.hour, 0)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.day_of_year, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.day_of_year, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
def noWrap(item):
return item
class TestFreqConversion(TestCase):
"Test frequency conversion of date objects"
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_B.asfreq('A'), ival_B_to_A)
assert_equal(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
assert_equal(ival_B.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B.asfreq('M'), ival_B_to_M)
assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
assert_equal(ival_B.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B_end_of_week.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B.asfreq('D'), ival_B_to_D)
assert_equal(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
assert_equal(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
assert_equal(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
assert_equal(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
assert_equal(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
assert_equal(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
assert_equal(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_D.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
assert_equal(ival_D.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_end_of_week.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
assert_equal(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
assert_equal(ival_H.asfreq('A'), ival_H_to_A)
assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
assert_equal(ival_H.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
assert_equal(ival_H.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H_end_of_week.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
assert_equal(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
assert_equal(ival_T.asfreq('A'), ival_T_to_A)
assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
assert_equal(ival_T.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
assert_equal(ival_T.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T_end_of_week.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
assert_equal(ival_T.asfreq('H'), ival_T_to_H)
assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
assert_equal(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
assert_equal(ival_S.asfreq('A'), ival_S_to_A)
assert_equal(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
assert_equal(ival_S.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S.asfreq('M'), ival_S_to_M)
assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
assert_equal(ival_S.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S_end_of_week.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S.asfreq('D'), ival_S_to_D)
assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
assert_equal(ival_S.asfreq('B'), ival_S_to_B)
assert_equal(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
assert_equal(ival_S.asfreq('H'), ival_S_to_H)
assert_equal(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
assert_equal(ival_S.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S.asfreq('S'), ival_S)
class TestPeriodIndex(TestCase):
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
self.assert_(isinstance(series, TimeSeries))
def test_to_timestamp(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = series.to_timestamp('D', 'end')
self.assert_(result.index.equals(exp_index))
self.assertEquals(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-DEC')
result = series.to_timestamp('D', 'start')
self.assert_(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = series.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
result = series.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assert_(result.index.equals(exp_index))
def test_constructor(self):
ii = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 9)
ii = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 4 * 9)
ii = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(ii), 12 * 9)
ii = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert_equal(len(ii), 365 * 9 + 2)
ii = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert_equal(len(ii), 261 * 9)
ii = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert_equal(len(ii), 365 * 24)
ii = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert_equal(len(ii), 24 * 60)
ii = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert_equal(len(ii), 24 * 60 * 60)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assert_((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assert_((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError('Must specify periods if missing start or end')
except ValueError:
pass
def test_shift(self):
ii1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(1).values, ii2.values)
ii1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(-1).values, ii2.values)
ii1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(1).values, ii2.values)
ii1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(-1).values, ii2.values)
ii1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(1).values, ii2.values)
ii1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
ii2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009')
assert_equal(len(ii1), len(ii2))
assert_equal(ii1.shift(-1).values, ii2.values)
def test_asfreq(self):
ii1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001')
ii2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001')
ii3 = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2001')
ii4 = PeriodIndex(freq='D', start='1/1/2001', end='1/1/2001')
ii5 = PeriodIndex(freq='H', start='1/1/2001', end='1/1/2001 00:00')
ii6 = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 00:00')
ii7 = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 00:00:00')
self.assertEquals(ii1.asfreq('Q', 'S'), ii2)
self.assertEquals(ii1.asfreq('Q', 's'), ii2)
self.assertEquals(ii1.asfreq('M', 'start'), ii3)
self.assertEquals(ii1.asfreq('D', 'StarT'), ii4)
self.assertEquals(ii1.asfreq('H', 'beGIN'), ii5)
self.assertEquals(ii1.asfreq('Min', 'S'), ii6)
self.assertEquals(ii1.asfreq('S', 'S'), ii7)
self.assertEquals(ii2.asfreq('A', 'S'), ii1)
self.assertEquals(ii2.asfreq('M', 'S'), ii3)
self.assertEquals(ii2.asfreq('D', 'S'), ii4)
self.assertEquals(ii2.asfreq('H', 'S'), ii5)
self.assertEquals(ii2.asfreq('Min', 'S'), ii6)
self.assertEquals(ii2.asfreq('S', 'S'), ii7)
self.assertEquals(ii3.asfreq('A', 'S'), ii1)
self.assertEquals(ii3.asfreq('Q', 'S'), ii2)
self.assertEquals(ii3.asfreq('D', 'S'), ii4)
self.assertEquals(ii3.asfreq('H', 'S'), ii5)
self.assertEquals(ii3.asfreq('Min', 'S'), ii6)
self.assertEquals(ii3.asfreq('S', 'S'), ii7)
self.assertEquals(ii4.asfreq('A', 'S'), ii1)
self.assertEquals(ii4.asfreq('Q', 'S'), ii2)
self.assertEquals(ii4.asfreq('M', 'S'), ii3)
self.assertEquals(ii4.asfreq('H', 'S'), ii5)
self.assertEquals(ii4.asfreq('Min', 'S'), ii6)
self.assertEquals(ii4.asfreq('S', 'S'), ii7)
self.assertEquals(ii5.asfreq('A', 'S'), ii1)
self.assertEquals(ii5.asfreq('Q', 'S'), ii2)
self.assertEquals(ii5.asfreq('M', 'S'), ii3)
self.assertEquals(ii5.asfreq('D', 'S'), ii4)
self.assertEquals(ii5.asfreq('Min', 'S'), ii6)
self.assertEquals(ii5.asfreq('S', 'S'), ii7)
self.assertEquals(ii6.asfreq('A', 'S'), ii1)
self.assertEquals(ii6.asfreq('Q', 'S'), ii2)
self.assertEquals(ii6.asfreq('M', 'S'), ii3)
self.assertEquals(ii6.asfreq('D', 'S'), ii4)
self.assertEquals(ii6.asfreq('H', 'S'), ii5)
self.assertEquals(ii6.asfreq('S', 'S'), ii7)
self.assertEquals(ii7.asfreq('A', 'S'), ii1)
self.assertEquals(ii7.asfreq('Q', 'S'), ii2)
self.assertEquals(ii7.asfreq('M', 'S'), ii3)
self.assertEquals(ii7.asfreq('D', 'S'), ii4)
self.assertEquals(ii7.asfreq('H', 'S'), ii5)
self.assertEquals(ii7.asfreq('Min', 'S'), ii6)
#self.assertEquals(ii7.asfreq('A', 'E'), i_end)
def test_badinput(self):
self.assertRaises(datetools.DateParseError, Period, '1/1/-2000', 'A')
self.assertRaises(ValueError, Period, -2000, 'A')
self.assertRaises(ValueError, Period, 0, 'A')
self.assertRaises(ValueError, PeriodIndex, [-1, 0, 1], 'A')
self.assertRaises(ValueError, PeriodIndex, np.array([-1, 0, 1]), 'A')
def test_dti_to_period(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
ii1 = dti.to_period()
ii2 = dti.to_period(freq='D')
self.assertEquals(ii1[0], Period('Jan 2005', freq='M'))
self.assertEquals(ii2[0], Period('1/31/2005', freq='D'))
self.assertEquals(ii1[-1], Period('Nov 2005', freq='M'))
self.assertEquals(ii2[-1], Period('11/30/2005', freq='D'))
def test_iindex_slice_index(self):
ii = PeriodIndex(start='1/1/10', end='12/31/12', freq='M')
s = Series(np.random.rand(len(ii)), index=ii)
res = s['2010']
exp = s[0:12]
assert_series_equal(res, exp)
res = s['2011']
exp = s[12:24]
assert_series_equal(res, exp)
def test_iindex_qaccess(self):
ii = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(ii)), index=ii).cumsum()
# Todo: fix these accessors!
self.assert_(s['05Q4'] == s[2])
def test_interval_dt64_round_trip(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='B')
ii = dti.to_period()
self.assert_(ii.to_timestamp().equals(dti))
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='B')
ii = dti.to_period(freq='3H')
self.assert_(ii.to_timestamp().equals(dti))
def test_iindex_multiples(self):
ii = | PeriodIndex(start='1/1/10', end='12/31/12', freq='2M') | pandas.tseries.period.PeriodIndex |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from scipy.stats import randint as sp_randint
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
path_train = "model/data_train.csv"
path_test = "model/data_test.csv"
path_mergered = "data/data_merged.csv"
def read_dataset(path):
return pd.read_csv(path)
def to_csv(path,dataframe):
np.savetxt(path, dataframe, delimiter=",")
def report(results, n_top=3): # Función para mostrar resultados
for i in range(2, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print("Parameters: {0}".format(results['params'][candidate]))
print("")
def random_forest(train, test):
features = train.columns[:13]
x_train = train[features]
y_train = train['attack']
x_test = test[features]
y_test = test['attack']
X, y = x_train, y_train
clf_rf = RandomForestClassifier(n_estimators=10, random_state = 0)
"""
param_dist = {"max_depth": [None],
"max_features": sp_randint(1, 13),
"min_samples_split": sp_randint(2, 95),
"min_samples_leaf": sp_randint(1, 95),
"bootstrap": [True, False], 'class_weight':['balanced'],
"criterion": ["gini", "entropy"]}
random_search = RandomizedSearchCV(clf_rf, scoring= 'f1_micro',
param_distributions=param_dist,
n_iter= 80)
random_search.fit(X, y)
"""
clf_rf.fit(X, y) # Construcción del modelo
preds_rf = clf_rf.predict(x_test) # Test del modelo
# report(random_search.cv_results_)
print("Random Forest: \n"
+classification_report(y_true=y_test, y_pred=preds_rf))
# Matriz de confusión
print("Matriz de confusión:\n")
matriz = | pd.crosstab(test['attack'], preds_rf, rownames=['actual'], colnames=['preds']) | pandas.crosstab |
#mcandrew
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.append("../")
from mods.datahelp import grabData, grabJHUData, grabDHSdata
class reportBuilder(object):
def __init__(self,gd):
self.predictions = gd.predictions()
self.qData = gd.questions()
self.quantiles = gd.quantiles()
self.metaData = gd.metaData()
def buildConsensusAndMedianPredictionText(self):
fromSurveyNumQidTXT2data = {}
for (surveyNum,qid),subset in self.quantiles.groupby(["surveynum","qid"]):
subset = subset.set_index(["quantile"])
median = subset.loc["0.5","value"]
_10thPct = subset.loc["0.1","value"]
_90thPct = subset.loc["0.9","value"]
if _10thPct > 10:
form="comma"
elif _10thPct >1:
form="1"
else:
form="2"
fromSurveyNumQidTXT2data[surveyNum,qid,"median",form] = median
fromSurveyNumQidTXT2data[surveyNum,qid,"_10",form] = _10thPct
fromSurveyNumQidTXT2data[surveyNum,qid,"_90",form] = _90thPct
self.reportDict = fromSurveyNumQidTXT2data
def addJHUdata(self,jhudata):
jhudata = jhudata.set_index(pd.to_datetime(jhudata.index))
mostRecentJhudata = jhudata.sort_index().iloc[-1,:]
dataDate = pd.to_datetime(mostRecentJhudata.name)
self.reportDict[-1,-1,"jhuday","d"] = dataDate.day
self.reportDict[-1,-1,"jhumonth","s"] = self.fromint2month(dataDate.month)
self.reportDict[-1,-1,"jhuyear","d"] = dataDate.year
self.reportDict[-1,-1,"jhucases","comma"] = mostRecentJhudata.cases
self.reportDict[-1,-1,"jhudeaths","comma"] = mostRecentJhudata.deaths
def addDHSdata(self,dhsData):
dhsData = dhsData.set_index("date")
mostRecentdata = dhsData.sort_index().iloc[-1,:]
dataDate = | pd.to_datetime(mostRecentdata.name) | pandas.to_datetime |
import pandas as pd
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
df_train = | pd.read_csv("DATASET/train.csv") | pandas.read_csv |
__author__ = '<NAME>'
from pandas import DataFrame, read_csv, concat
from os import path
import numpy as np
from datetime import timedelta
from enum import Enum
class OrderEvent(Enum):
SUBMISSION = 1
CANCELLATION = 2
DELETION = 3
EXECUTION = 4
HIDDEN_EXECUTION =5
CROSS_TRADE = 6
TRADING_HALT = 7
OTHER = 8
__EventMap = {}
for e in OrderEvent:
__EventMap[e.value] = e
def get_orderEvent(eventid):
return __EventMap[eventid]
class LobsterData:
def __init__(self):
self.order_books = None
self.messages = | DataFrame() | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas.compat import (
pa_version_under2p0,
pa_version_under4p0,
)
from pandas.errors import PerformanceWarning
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
isna,
)
import pandas._testing as tm
@pytest.mark.parametrize("pattern", [0, True, Series(["foo", "bar"])])
def test_startswith_endswith_non_str_patterns(pattern):
# GH3485
ser = Series(["foo", "bar"])
msg = f"expected a string object, not {type(pattern).__name__}"
with pytest.raises(TypeError, match=msg):
ser.str.startswith(pattern)
with pytest.raises(TypeError, match=msg):
ser.str.endswith(pattern)
def assert_series_or_index_equal(left, right):
if isinstance(left, Series):
tm.assert_series_equal(left, right)
else: # Index
tm.assert_index_equal(left, right)
def test_iter():
# GH3638
strs = "google", "wikimedia", "wikipedia", "wikitravel"
ser = Series(strs)
with tm.assert_produces_warning(FutureWarning):
for s in ser.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ser.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, str) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == "l"
def test_iter_empty(any_string_dtype):
ser = Series([], dtype=any_string_dtype)
i, s = 100, 1
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(any_string_dtype):
ser = Series(["a"], dtype=any_string_dtype)
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
assert not i
tm.assert_series_equal(ser, s)
def test_iter_object_try_string():
ser = Series(
[
slice(None, np.random.randint(10), np.random.randint(10, 20))
for _ in range(4)
]
)
i, s = 100, "h"
with tm.assert_produces_warning(FutureWarning):
for i, s in enumerate(ser.str):
pass
assert i == 100
assert s == "h"
# test integer/float dtypes (inferred by constructor) and mixed
def test_count(any_string_dtype):
ser = Series(["foo", "foofoo", np.nan, "foooofooofommmfoo"], dtype=any_string_dtype)
result = ser.str.count("f[o]+")
expected_dtype = np.float64 if any_string_dtype == "object" else "Int64"
expected = Series([1, 2, np.nan, 4], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
def test_count_mixed_object():
ser = Series(
["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0],
dtype=object,
)
result = ser.str.count("a")
expected = Series([1, np.nan, 0, np.nan, np.nan, 0, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_repeat(any_string_dtype):
ser = Series(["a", "b", np.nan, "c", np.nan, "d"], dtype=any_string_dtype)
result = ser.str.repeat(3)
expected = Series(
["aaa", "bbb", np.nan, "ccc", np.nan, "ddd"], dtype=any_string_dtype
)
tm.assert_series_equal(result, expected)
result = ser.str.repeat([1, 2, 3, 4, 5, 6])
expected = Series(
["a", "bb", np.nan, "cccc", np.nan, "dddddd"], dtype=any_string_dtype
)
tm.assert_series_equal(result, expected)
def test_repeat_mixed_object():
ser = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0])
result = ser.str.repeat(3)
expected = Series(
["aaa", np.nan, "bbb", np.nan, np.nan, "foofoofoo", np.nan, np.nan, np.nan]
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg, repeat", [[None, 4], ["b", None]])
def test_repeat_with_null(any_string_dtype, arg, repeat):
# GH: 31632
ser = Series(["a", arg], dtype=any_string_dtype)
result = ser.str.repeat([3, repeat])
expected = Series(["aaa", np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
def test_empty_str_methods(any_string_dtype):
empty_str = empty = Series(dtype=any_string_dtype)
if any_string_dtype == "object":
empty_int = Series(dtype="int64")
empty_bool = Series(dtype=bool)
else:
empty_int = Series(dtype="Int64")
empty_bool = Series(dtype="boolean")
empty_object = Series(dtype=object)
empty_bytes = Series(dtype=object)
empty_df = DataFrame()
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert "" == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.contains("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.startswith("a"))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.endswith("a"))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.replace("a", "b"))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_bool, empty.str.match("^a"))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=any_string_dtype),
empty.str.extract("()", expand=True),
)
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=any_string_dtype),
empty.str.extract("()()", expand=True),
)
tm.assert_series_equal(empty_str, empty.str.extract("()", expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=any_string_dtype),
empty.str.extract("()()", expand=False),
)
tm.assert_frame_equal(empty_df, empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(""))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_object, empty_str.str.findall("a"))
tm.assert_series_equal(empty_int, empty.str.find("a"))
tm.assert_series_equal(empty_int, empty.str.rfind("a"))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_object, empty.str.split("a"))
tm.assert_series_equal(empty_object, empty.str.rsplit("a"))
tm.assert_series_equal(empty_object, empty.str.partition("a", expand=False))
tm.assert_frame_equal(empty_df, empty.str.partition("a"))
tm.assert_series_equal(empty_object, empty.str.rpartition("a", expand=False))
tm.assert_frame_equal(empty_df, empty.str.rpartition("a"))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.strip())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.lstrip())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_object, empty_bytes.str.decode("ascii"))
tm.assert_series_equal(empty_bytes, empty.str.encode("ascii"))
# ismethods should always return boolean (GH 29624)
tm.assert_series_equal(empty_bool, empty.str.isalnum())
tm.assert_series_equal(empty_bool, empty.str.isalpha())
tm.assert_series_equal(empty_bool, empty.str.isdigit())
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under2p0,
):
tm.assert_series_equal(empty_bool, empty.str.isspace())
tm.assert_series_equal(empty_bool, empty.str.islower())
tm.assert_series_equal(empty_bool, empty.str.isupper())
tm.assert_series_equal(empty_bool, empty.str.istitle())
tm.assert_series_equal(empty_bool, empty.str.isnumeric())
tm.assert_series_equal(empty_bool, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize("NFC"))
table = str.maketrans("a", "b")
tm.assert_series_equal(empty_str, empty.str.translate(table))
@pytest.mark.parametrize(
"method, expected",
[
("isalnum", [True, True, True, True, True, False, True, True, False, False]),
("isalpha", [True, True, True, False, False, False, True, False, False, False]),
(
"isdigit",
[False, False, False, True, False, False, False, True, False, False],
),
(
"isnumeric",
[False, False, False, True, False, False, False, True, False, False],
),
(
"isspace",
[False, False, False, False, False, False, False, False, False, True],
),
(
"islower",
[False, True, False, False, False, False, False, False, False, False],
),
(
"isupper",
[True, False, False, False, True, False, True, False, False, False],
),
(
"istitle",
[True, False, True, False, True, False, False, False, False, False],
),
],
)
def test_ismethods(method, expected, any_string_dtype):
ser = Series(
["A", "b", "Xy", "4", "3A", "", "TT", "55", "-", " "], dtype=any_string_dtype
)
expected_dtype = "bool" if any_string_dtype == "object" else "boolean"
expected = Series(expected, dtype=expected_dtype)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]"
and pa_version_under2p0
and method == "isspace",
):
result = getattr(ser.str, method)()
tm.assert_series_equal(result, expected)
# compare with standard library
expected = [getattr(item, method)() for item in ser]
assert list(result) == expected
@pytest.mark.parametrize(
"method, expected",
[
("isnumeric", [False, True, True, False, True, True, False]),
("isdecimal", [False, True, False, False, False, True, False]),
],
)
def test_isnumeric_unicode(method, expected, any_string_dtype):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
ser = Series(["A", "3", "¼", "★", "፸", "3", "four"], dtype=any_string_dtype)
expected_dtype = "bool" if any_string_dtype == "object" else "boolean"
expected = Series(expected, dtype=expected_dtype)
result = getattr(ser.str, method)()
tm.assert_series_equal(result, expected)
# compare with standard library
expected = [getattr(item, method)() for item in ser]
assert list(result) == expected
@pytest.mark.parametrize(
"method, expected",
[
("isnumeric", [False, np.nan, True, False, np.nan, True, False]),
("isdecimal", [False, np.nan, False, False, np.nan, True, False]),
],
)
def test_isnumeric_unicode_missing(method, expected, any_string_dtype):
values = ["A", np.nan, "¼", "★", np.nan, "3", "four"]
ser = Series(values, dtype=any_string_dtype)
expected_dtype = "object" if any_string_dtype == "object" else "boolean"
expected = Series(expected, dtype=expected_dtype)
result = getattr(ser.str, method)()
tm.assert_series_equal(result, expected)
def test_spilt_join_roundtrip(any_string_dtype):
ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)
result = ser.str.split("_").str.join("_")
expected = ser.astype(object)
tm.assert_series_equal(result, expected)
def test_spilt_join_roundtrip_mixed_object():
ser = Series(
["a_b", np.nan, "asdf_cas_asdf", True, datetime.today(), "foo", None, 1, 2.0]
)
result = ser.str.split("_").str.join("_")
expected = Series(
["a_b", np.nan, "asdf_cas_asdf", np.nan, np.nan, "foo", np.nan, np.nan, np.nan]
)
tm.assert_series_equal(result, expected)
def test_len(any_string_dtype):
ser = Series(
["foo", "fooo", "fooooo", np.nan, "fooooooo", "foo\n", "あ"],
dtype=any_string_dtype,
)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
result = ser.str.len()
expected_dtype = "float64" if any_string_dtype == "object" else "Int64"
expected = Series([3, 4, 6, np.nan, 8, 4, 1], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
def test_len_mixed():
ser = Series(
["a_b", np.nan, "asdf_cas_asdf", True, datetime.today(), "foo", None, 1, 2.0]
)
result = ser.str.len()
expected = Series([3, np.nan, 13, np.nan, np.nan, 3, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method,sub,start,end,expected",
[
("index", "EF", None, None, [4, 3, 1, 0]),
("rindex", "EF", None, None, [4, 5, 7, 4]),
("index", "EF", 3, None, [4, 3, 7, 4]),
("rindex", "EF", 3, None, [4, 5, 7, 4]),
("index", "E", 4, 8, [4, 5, 7, 4]),
("rindex", "E", 0, 5, [4, 3, 1, 4]),
],
)
def test_index(method, sub, start, end, index_or_series, any_string_dtype, expected):
obj = index_or_series(
["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"], dtype=any_string_dtype
)
expected_dtype = np.int64 if any_string_dtype == "object" else "Int64"
expected = index_or_series(expected, dtype=expected_dtype)
result = getattr(obj.str, method)(sub, start, end)
if index_or_series is Series:
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
# compare with standard library
expected = [getattr(item, method)(sub, start, end) for item in obj]
assert list(result) == expected
def test_index_not_found_raises(index_or_series, any_string_dtype):
obj = index_or_series(
["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"], dtype=any_string_dtype
)
with pytest.raises(ValueError, match="substring not found"):
obj.str.index("DE")
@pytest.mark.parametrize("method", ["index", "rindex"])
def test_index_wrong_type_raises(index_or_series, any_string_dtype, method):
obj = index_or_series([], dtype=any_string_dtype)
msg = "expected a string object, not int"
with pytest.raises(TypeError, match=msg):
getattr(obj.str, method)(0)
@pytest.mark.parametrize(
"method, exp",
[
["index", [1, 1, 0]],
["rindex", [3, 1, 2]],
],
)
def test_index_missing(any_string_dtype, method, exp):
ser = Series(["abcb", "ab", "bcbe", np.nan], dtype=any_string_dtype)
expected_dtype = np.float64 if any_string_dtype == "object" else "Int64"
result = getattr(ser.str, method)("b")
expected = Series(exp + [np.nan], dtype=expected_dtype)
tm.assert_series_equal(result, expected)
def test_pipe_failures(any_string_dtype):
# #2119
ser = Series(["A|B|C"], dtype=any_string_dtype)
result = ser.str.split("|")
expected = Series([["A", "B", "C"]], dtype=object)
tm.assert_series_equal(result, expected)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
result = ser.str.replace("|", " ", regex=False)
expected = Series(["A B C"], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"start, stop, step, expected",
[
(2, 5, None, ["foo", "bar", np.nan, "baz"]),
(0, 3, -1, ["", "", np.nan, ""]),
(None, None, -1, ["owtoofaa", "owtrabaa", np.nan, "xuqzabaa"]),
(3, 10, 2, ["oto", "ato", np.nan, "aqx"]),
(3, 0, -1, ["ofa", "aba", np.nan, "aba"]),
],
)
def test_slice(start, stop, step, expected, any_string_dtype):
ser = Series(["aafootwo", "aabartwo", np.nan, "aabazqux"], dtype=any_string_dtype)
result = ser.str.slice(start, stop, step)
expected = Series(expected, dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"start, stop, step, expected",
[
(2, 5, None, ["foo", np.nan, "bar", np.nan, np.nan, np.nan, np.nan, np.nan]),
(4, 1, -1, ["oof", np.nan, "rab", np.nan, np.nan, np.nan, np.nan, np.nan]),
],
)
def test_slice_mixed_object(start, stop, step, expected):
ser = Series(["aafootwo", np.nan, "aabartwo", True, datetime.today(), None, 1, 2.0])
result = ser.str.slice(start, stop, step)
expected = Series(expected)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"start,stop,repl,expected",
[
(2, 3, None, ["shrt", "a it longer", "evnlongerthanthat", "", np.nan]),
(2, 3, "z", ["shzrt", "a zit longer", "evznlongerthanthat", "z", np.nan]),
(2, 2, "z", ["shzort", "a zbit longer", "evzenlongerthanthat", "z", np.nan]),
(2, 1, "z", ["shzort", "a zbit longer", "evzenlongerthanthat", "z", np.nan]),
(-1, None, "z", ["shorz", "a bit longez", "evenlongerthanthaz", "z", np.nan]),
(None, -2, "z", ["zrt", "zer", "zat", "z", np.nan]),
(6, 8, "z", ["shortz", "a bit znger", "evenlozerthanthat", "z", np.nan]),
(-10, 3, "z", ["zrt", "a zit longer", "evenlongzerthanthat", "z", np.nan]),
],
)
def test_slice_replace(start, stop, repl, expected, any_string_dtype):
ser = Series(
["short", "a bit longer", "evenlongerthanthat", "", np.nan],
dtype=any_string_dtype,
)
expected = Series(expected, dtype=any_string_dtype)
result = ser.str.slice_replace(start, stop, repl)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, exp",
[
["strip", ["aa", "bb", np.nan, "cc"]],
["lstrip", ["aa ", "bb \n", np.nan, "cc "]],
["rstrip", [" aa", " bb", np.nan, "cc"]],
],
)
def test_strip_lstrip_rstrip(any_string_dtype, method, exp):
ser = Series([" aa ", " bb \n", np.nan, "cc "], dtype=any_string_dtype)
result = getattr(ser.str, method)()
expected = Series(exp, dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, exp",
[
["strip", ["aa", np.nan, "bb"]],
["lstrip", ["aa ", np.nan, "bb \t\n"]],
["rstrip", [" aa", np.nan, " bb"]],
],
)
def test_strip_lstrip_rstrip_mixed_object(method, exp):
ser = Series([" aa ", np.nan, " bb \t\n", True, datetime.today(), None, 1, 2.0])
result = getattr(ser.str, method)()
expected = Series(exp + [np.nan, np.nan, np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"method, exp",
[
["strip", ["ABC", " BNSD", "LDFJH "]],
["lstrip", ["ABCxx", " BNSD", "LDFJH xx"]],
["rstrip", ["xxABC", "xx BNSD", "LDFJH "]],
],
)
def test_strip_lstrip_rstrip_args(any_string_dtype, method, exp):
ser = Series(["xxABCxx", "xx BNSD", "LDFJH xx"], dtype=any_string_dtype)
with tm.maybe_produces_warning(
PerformanceWarning,
any_string_dtype == "string[pyarrow]" and pa_version_under4p0,
):
result = getattr(ser.str, method)("x")
expected = Series(exp, dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"prefix, expected", [("a", ["b", " b c", "bc"]), ("ab", ["", "a b c", "bc"])]
)
def test_removeprefix(any_string_dtype, prefix, expected):
ser = Series(["ab", "a b c", "bc"], dtype=any_string_dtype)
result = ser.str.removeprefix(prefix)
ser_expected = | Series(expected, dtype=any_string_dtype) | pandas.Series |
import pandas as pd
from abb_deeplearning.abb_data_pipeline import abb_clouddrl_read_pipeline as abb_rp
from abb_deeplearning.abb_data_pipeline import abb_clouddrl_constants as abb_c
import datetime as dt
import os
df_master = pd.read_hdf('/media/data/Daten/data_C_int/master_index_cav.h5')
file_filter={"_sp_256", ".jpeg"}
df_data_files=[]
df_label_files=[]
#(dt.datetime.strptime('2015-09-28', '%Y-%m-%d'),dt.datetime.strptime('2015-09-30', '%Y-%m-%d'))
for day in abb_rp.read_cld_img_time_range_paths(img_d_tup_l=None,automatic_daytime=True,
file_filter=file_filter, get_sp_data=True,get_cs_data=True,get_mpc_data=True, randomize_days=False):
#Img
image_keys = list(day[0].keys())
img_data = list(day[0].values())
folders = [p.split('/')[5] for p in img_data]
names = [p.split('/')[6] for p in img_data]
img_df = pd.DataFrame(data={'folder':folders,'name':names},index=image_keys)
#IRR
irr = pd.read_csv(day[1] , index_col=0, parse_dates=True,
header=None)
irr_data = irr.loc[pd.to_datetime(image_keys)]
irr_data.columns = [['irradiation_hs']]
#MPC100
mpc= pd.read_csv(day[2].rsplit('.',1)[0]+"100.csv", index_col=0, parse_dates=True,
header=None)
mpc_data = mpc.loc[ | pd.to_datetime(image_keys) | pandas.to_datetime |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib as mpl
import netCDF4 as nc
import datetime as dt
from salishsea_tools import evaltools as et, places, viz_tools, visualisations, geo_tools
import xarray as xr
import pandas as pd
import pickle
import os
import gsw
# Extracting winds from the correct path
def getWindVarsYear(year,loc):
''' Given a year, returns the correct directory and nam_fmt for wind forcing as well as the
location of S3 on the corresponding grid.
Parameters:
year: a year value in integer form
loc: the location name as a string. Eg. loc='S3'
Returns:
jW: y-coordinate for the location
iW: x-coordinate for the location
opsdir: path to directory where wind forcing file is stored
nam_fmt: naming convention of the appropriate files
'''
if year>2014:
opsdir='/results/forcing/atmospheric/GEM2.5/operational/'
nam_fmt='ops'
jW,iW=places.PLACES[loc]['GEM2.5 grid ji']
else:
opsdir='/data/eolson/results/MEOPAR/GEMLAM/'
nam_fmt='gemlam'
with xr.open_dataset('/results/forcing/atmospheric/GEM2.5/gemlam/gemlam_y2012m03d01.nc') as gridrefWind:
# always use a post-2011 file here to identify station grid location
lon,lat=places.PLACES[loc]['lon lat']
jW,iW=geo_tools.find_closest_model_point(lon,lat,
gridrefWind.variables['nav_lon'][:,:]-360,gridrefWind.variables['nav_lat'][:,:],
grid='GEM2.5')
# the -360 is needed because longitudes in this case are reported in postive degrees East
return jW,iW,opsdir,nam_fmt
# Metric 1:
def metric1_bloomtime(phyto_alld,no3_alld,bio_time):
''' Given datetime array and two 2D arrays of phytoplankton and nitrate concentrations, over time
and depth, returns a datetime value of the spring phytoplankton bloom date according to the
following definition (now called 'metric 1'):
'The spring bloom date is the peak phytoplankton concentration (averaged from the surface to
3 m depth) within four days of the average upper 3 m nitrate concentration going below 0.5 uM
(the half-saturation concentration) for two consecutive days'
EDIT: 0.5 uM was changed to 2.0 uM to yield more accurate results
Parameters:
phyto_alld: 2D array of phytoplankton concentrations (in uM N) over all depths and time
range of 'bio_time'
no3_alld: 2D array of nitrate concentrations (in uM N) over all depths and time
range of 'bio_time'
bio_time: 1D datetime array of the same time frame as phyto_alld and no3_alld
Returns:
bloomtime1: the spring bloom date as a single datetime value
'''
# a) get avg phytplankton in upper 3m
phyto_alld_df=pd.DataFrame(phyto_alld)
upper_3m_phyto=pd.DataFrame(phyto_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_phyto.columns=['upper_3m_phyto']
#upper_3m_phyto
# b) get average no3 in upper 3m
no3_alld_df=pd.DataFrame(no3_alld)
upper_3m_no3=pd.DataFrame(no3_alld_df[[0,1,2,3]].mean(axis=1))
upper_3m_no3.columns=['upper_3m_no3']
#upper_3m_no3
# make bio_time into a dataframe
bio_time_df=pd.DataFrame(bio_time)
bio_time_df.columns=['bio_time']
metric1_df= | pd.concat((bio_time_df,upper_3m_phyto,upper_3m_no3), axis=1) | pandas.concat |
#!/usr/bin/env python3
"""
https://www.ebi.ac.uk/gwas/rest/docs/api
"""
###
import sys,os,re,json,time,logging,tqdm
import urllib.parse
import pandas as pd
#
from ..util import rest
#
API_HOST='www.ebi.ac.uk'
API_BASE_PATH='/gwas/rest/api'
BASE_URL='https://'+API_HOST+API_BASE_PATH
#
NCHUNK=100;
#
##############################################################################
def ListStudies(base_url=BASE_URL, fout=None):
"""Only simple metadata."""
tags=[]; n_study=0; rval=None; df=None; tq=None;
url_this = base_url+f'/studies?size={NCHUNK}'
while True:
if rval:
if 'next' not in rval['_links']: break
elif url_this == rval['_links']['last']['href']: break
else: url_this = rval['_links']['next']['href']
logging.debug(url_this)
rval = rest.Utils.GetURL(url_this, parse_json=True)
if not rval or '_embedded' not in rval or 'studies' not in rval['_embedded']: break
studies = rval['_embedded']['studies']
if not studies: break
if tq is None: tq = tqdm.tqdm(total=rval["page"]["totalElements"], unit="studies")
for study in studies:
tq.update()
if not tags:
for tag in study.keys():
if type(study[tag]) not in (list, dict) or tag=="diseaseTrait":
tags.append(tag) #Only simple metadata.
df_this = pd.DataFrame({tags[j]:([str(study[tags[j]])] if tags[j] in study else ['']) for j in range(len(tags))})
if fout: df_this.to_csv(fout, "\t", index=False, header=(n_study==0), mode=('w' if n_study==0 else 'a'))
if fout is None: df = pd.concat([df, df_this])
n_study+=1
logging.info(f"n_study: {n_study}")
if fout is None: return(df)
##############################################################################
def GetStudyAssociations(ids, skip=0, nmax=None, base_url=BASE_URL, fout=None):
"""
Mapped genes via SNP links.
arg = authorReportedGene
sra = strongestRiskAllele
https://www.ebi.ac.uk/gwas/rest/api/studies/GCST001430/associations?projection=associationByStudy
"""
n_id=0; n_assn=0; n_loci=0; n_arg=0; n_sra=0; n_snp=0; df=None; tq=None;
quiet = bool(logging.getLogger().getEffectiveLevel()>15)
gcsts=set([]); tags_assn=[]; tags_study=[]; tags_locus=[]; tags_sra=[]; tags_arg=[];
url = base_url+'/studies'
if skip>0: logging.info(f"SKIP IDs skipped: {skip}")
for id_this in ids[skip:]:
if not quiet and tq is None: tq = tqdm.tqdm(total=len(ids)-skip, unit="studies")
if tq is not None: tq.update()
url_this = url+f'/{id_this}/associations?projection=associationByStudy'
rval = rest.Utils.GetURL(url_this, parse_json=True)
if not rval: continue
if '_embedded' in rval and 'associations' in rval['_embedded']:
assns = rval['_embedded']['associations']
else:
logging.error(f'No associations for study: {id_this}')
continue
df_this=None;
for assn in assns:
n_assn+=1
if n_assn==1:
for key,val in assn.items():
if type(val) not in (list, dict):
tags_assn.append(key)
for key in assn['study'].keys():
if type(assn['study'][key]) not in (list, dict):
tags_study.append(key)
for key in assn['loci'][0].keys():
if key not in ('strongestRiskAlleles', 'authorReportedGenes'):
tags_locus.append(key)
for key in assn['loci'][0]['strongestRiskAlleles'][0].keys():
if key != '_links':
tags_sra.append(key)
df_assn = pd.DataFrame({tags_assn[j]:[assn[tags_assn[j]]] for j in range(len(tags_assn))})
df_study = pd.DataFrame({tags_study[j]:[assn['study'][tags_study[j]]] for j in range(len(tags_study))})
df_locus = pd.DataFrame({tags_locus[j]:[assn['loci'][0][tags_locus[j]]] for j in range(len(tags_locus))})
df_locus.columns = ['locus_'+s for s in df_locus.columns]
df_sra = pd.DataFrame({tags_sra[j]:[assn['loci'][0]['strongestRiskAlleles'][0][tags_sra[j]]] for j in range(len(tags_sra))})
df_sra.columns = ['allele_'+s for s in df_sra.columns]
df_assn = pd.concat([df_assn, df_study, df_locus, df_sra], axis=1)
df_this = pd.concat([df_this, df_assn], axis=0)
if 'accessionId' in assn['study']: gcsts.add(assn['study']['accessionId'])
n_loci += len(assn['loci'])
for locus in assn['loci']:
n_sra += len(locus['strongestRiskAlleles'])
for sra in locus['strongestRiskAlleles']:
snp_href = sra['_links']['snp']['href'] if '_links' in sra and 'snp' in sra['_links'] and 'href' in sra['_links']['snp'] else ''
if snp_href: n_snp+=1
if fout: df_this.to_csv(fout, "\t", index=False, header=(n_id==0), mode=('w' if n_id==0 else 'a'))
if fout is None: df = pd.concat([df, df_this], axis=0)
n_id+=1
if n_id==nmax:
logging.info(f"NMAX IDs reached: {nmax}")
break
if tq is not None: tq.close()
n_gcst = len(gcsts)
logging.info(f"INPUT RCSTs: {n_id}; OUTPUT RCSTs: {n_gcst} ; assns: {n_assn} ; loci: {n_loci} ; alleles: {n_sra} ; snps: {n_snp}")
if fout is None: return(df)
##############################################################################
def GetSnps(ids, skip=0, nmax=None, base_url=BASE_URL, fout=None):
"""
Input: rs_id, e.g. rs7329174
loc = location
gc = genomicContext
"""
n_snp=0; n_gene=0; n_loc=0; df=None; tq=None;
tags_snp=[]; tags_loc=[]; tags_gc=[]; tags_gcloc=[]; tags_gene=[];
quiet = bool(logging.getLogger().getEffectiveLevel()>15)
url = base_url+'/singleNucleotidePolymorphisms'
if skip>0: logging.info(f"SKIP IDs skipped: {skip}")
for id_this in ids[skip:]:
if not quiet and tq is None: tq = tqdm.tqdm(total=len(ids)-skip, unit="snps")
if tq is not None: tq.update()
url_this = url+'/'+id_this
snp = rest.Utils.GetURL(url_this, parse_json=True)
if not snp: continue
if 'genomicContexts' not in snp: continue
if len(snp['genomicContexts'])==0: continue
df_this=None;
for gc in snp['genomicContexts']:
if not tags_snp:
for key,val in snp.items():
if type(val) not in (list, dict): tags_snp.append(key)
for key in gc.keys():
if key not in ('gene', 'location', '_links'): tags_gc.append(key)
for key in gc['location'].keys():
if key != '_links': tags_gcloc.append(key)
for key in gc['gene'].keys():
if key != '_links': tags_gene.append(key)
df_snp = pd.DataFrame({tags_snp[j]:[snp[tags_snp[j]]] for j in range(len(tags_snp))})
df_gc = pd.DataFrame({tags_gc[j]:[gc[tags_gc[j]]] for j in range(len(tags_gc))})
gcloc = gc['location']
df_gcloc = pd.DataFrame({tags_gcloc[j]:[gcloc[tags_gcloc[j]]] for j in range(len(tags_gcloc))})
gene = gc['gene']
try: gene["ensemblGeneIds"] = (",".join([gid["ensemblGeneId"] for gid in gene["ensemblGeneIds"]]))
except: pass
try: gene["entrezGeneIds"] = (",".join([gid["entrezGeneId"] for gid in gene["entrezGeneIds"]]))
except: pass
df_gene = pd.DataFrame({tags_gene[j]:[gene[tags_gene[j]]] for j in range(len(tags_gene))})
df_snp = | pd.concat([df_snp, df_gc, df_gcloc, df_gene], axis=1) | pandas.concat |
"""Probabilistic autoregressive model."""
import logging
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from deepecho.models.base import DeepEcho
LOGGER = logging.getLogger(__name__)
class PARNet(torch.nn.Module):
"""PARModel ANN model."""
def __init__(self, data_size, context_size, hidden_size=32):
super(PARNet, self).__init__()
self.context_size = context_size
self.down = torch.nn.Linear(data_size + context_size, hidden_size)
self.rnn = torch.nn.GRU(hidden_size, hidden_size)
self.up = torch.nn.Linear(hidden_size, data_size)
def forward(self, x, c):
"""Forward passing computation."""
if isinstance(x, torch.nn.utils.rnn.PackedSequence):
x, lengths = torch.nn.utils.rnn.pad_packed_sequence(x)
if self.context_size:
x = torch.cat([
x,
c.unsqueeze(0).expand(x.shape[0], c.shape[0], c.shape[1])
], dim=2)
x = self.down(x)
x = torch.nn.utils.rnn.pack_padded_sequence(x, lengths, enforce_sorted=False)
x, _ = self.rnn(x)
x, lengths = torch.nn.utils.rnn.pad_packed_sequence(x)
x = self.up(x)
x = torch.nn.utils.rnn.pack_padded_sequence(x, lengths, enforce_sorted=False)
else:
if self.context_size:
x = torch.cat([
x,
c.unsqueeze(0).expand(x.shape[0], c.shape[0], c.shape[1])
], dim=2)
x = self.down(x)
x, _ = self.rnn(x)
x = self.up(x)
return x
class PARModel(DeepEcho):
"""Probabilistic autoregressive model.
1. Analyze data types.
- Categorical/Ordinal: Create mapping.
- Continuous/Timestamp: Normalize, etc.
- Count: Compute min and range
2. Data -> Tensor
- Categorical/Ordinal: Create one-hot
- Continuous/Timestamp: Copy into `mu` after normalize, set `var=0.0`
- Count: Subtract min, divide by range, copy into `r`, set `p=0.0`
3. Loss
- Categorical/Ordinal: Cross entropy
- Continuous/Timestamp: Gaussian likelihood
- Count: Negative binomial (multiply param + value by range), evaluate loss.
4. Sample (Tensor -> Tensor)
- Categorical/Ordinal: Categorical distribution, store as one hot
- Continuous/Timestamp: Gaussian sample, store into `mu`
- Count: Negative binomial sample (multiply `r` by range?), divide by range
5. Tensor -> Data
- Categorical/Ordinal: Find the maximum value
- Continuous/Timestamp: Rescale the value in `mu`
- Count: Multiply by range, add min.
Args:
epochs (int):
The number of epochs to train for. Defaults to 128.
sample_size (int):
The number of times to sample (before choosing and
returning the sample which maximizes the likelihood).
Defaults to 1.
cuda (bool):
Whether to attempt to use cuda for GPU computation.
If this is False or CUDA is not available, CPU will be used.
Defaults to ``True``.
verbose (bool):
Whether to print progress to console or not.
"""
def __init__(self, epochs=128, sample_size=1, cuda=True, verbose=True):
self.epochs = epochs
self.sample_size = sample_size
if not cuda or not torch.cuda.is_available():
device = 'cpu'
elif isinstance(cuda, str):
device = cuda
else:
device = 'cuda'
self.device = torch.device(device)
self.verbose = verbose
LOGGER.info('%s instance created', self)
if verbose:
print(self, 'instance created')
def __repr__(self):
return "{}(epochs={}, sample_size={}, cuda='{}', verbose={})".format(
self.__class__.__name__,
self.epochs,
self.sample_size,
self.device,
self.verbose,
)
def _idx_map(self, x, t):
idx = 0
idx_map = {}
for i, t in enumerate(t):
if t == 'continuous' or t == 'datetime':
idx_map[i] = {
'type': t,
'mu': np.nanmean(x[i]),
'std': np.nanstd(x[i]),
'nulls': pd.isnull(x[i]).any(),
'indices': (idx, idx + 1, idx + 2)
}
idx += 3
elif t == 'count':
idx_map[i] = {
'type': t,
'min': np.nanmin(x[i]),
'range': np.nanmax(x[i]) - np.nanmin(x[i]),
'nulls': pd.isnull(x[i]).any(),
'indices': (idx, idx + 1, idx + 2)
}
idx += 3
elif t == 'categorical' or t == 'ordinal':
idx_map[i] = {
'type': t,
'indices': {}
}
idx += 1
for v in set(x[i]):
if pd.isnull(v):
v = None
idx_map[i]['indices'][v] = idx
idx += 1
else:
raise ValueError('Unsupported type: {}'.format(t))
return idx_map, idx
def _build(self, sequences, context_types, data_types):
contexts = [[] for _ in range(len(context_types))]
data = [[] for _ in range(len(data_types))]
min_length = np.inf
max_length = -np.inf
for sequence in sequences:
sequence_data = sequence['data']
sequence_context = sequence['context']
sequence_length = len(sequence_data[0])
min_length = min(min_length, sequence_length)
max_length = max(max_length, sequence_length)
for i in range(len(context_types)):
contexts[i].append(sequence_context[i])
for i in range(len(data_types)):
data[i].extend(sequence_data[i])
self._fixed_length = min_length == max_length
self._min_length = min_length
self._max_length = max_length
self._ctx_map, self._ctx_dims = self._idx_map(contexts, context_types)
self._data_map, self._data_dims = self._idx_map(data, data_types)
self._data_map['<TOKEN>'] = {
'type': 'categorical',
'indices': {
'<START>': self._data_dims,
'<END>': self._data_dims + 1,
'<BODY>': self._data_dims + 2
}
}
self._data_dims += 3
def _data_to_tensor(self, data):
seq_len = len(data[0])
X = []
x = torch.zeros(self._data_dims)
x[self._data_map['<TOKEN>']['indices']['<START>']] = 1.0
X.append(x)
for i in range(seq_len):
x = torch.zeros(self._data_dims)
for key, props in self._data_map.items():
if key == '<TOKEN>':
x[self._data_map['<TOKEN>']['indices']['<BODY>']] = 1.0
elif props['type'] in ['continuous', 'timestamp']:
mu_idx, sigma_idx, missing_idx = props['indices']
if pd.isnull(data[key][i]) or props['std'] == 0:
x[mu_idx] = 0.0
else:
x[mu_idx] = (data[key][i] - props['mu']) / props['std']
x[sigma_idx] = 0.0
x[missing_idx] = 1.0 if pd.isnull(data[key][i]) else 0.0
elif props['type'] in ['count']:
r_idx, p_idx, missing_idx = props['indices']
if pd.isnull(data[key][i]) or props['range'] == 0:
x[r_idx] = 0.0
else:
x[r_idx] = (data[key][i] - props['min']) / props['range']
x[p_idx] = 0.0
x[missing_idx] = 1.0 if pd.isnull(data[key][i]) else 0.0
elif props['type'] in ['categorical', 'ordinal']: # categorical
value = data[key][i]
if pd.isnull(value):
value = None
x[props['indices'][value]] = 1.0
else:
raise ValueError()
X.append(x)
x = torch.zeros(self._data_dims)
x[self._data_map['<TOKEN>']['indices']['<END>']] = 1.0
X.append(x)
return torch.stack(X, dim=0).to(self.device)
def _context_to_tensor(self, context):
if not self._ctx_dims:
return None
x = torch.zeros(self._ctx_dims)
for key, props in self._ctx_map.items():
if props['type'] in ['continuous', 'datetime']:
mu_idx, sigma_idx, missing_idx = props['indices']
x[mu_idx] = 0.0 if ( | pd.isnull(context[key]) | pandas.isnull |
import locale
import numpy as np
import pytest
from pandas.compat import (
is_platform_windows,
np_version_under1p19,
)
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
from pandas.core.arrays.floating import (
Float32Dtype,
Float64Dtype,
)
def test_uses_pandas_na():
a = pd.array([1, None], dtype=Float64Dtype())
assert a[1] is pd.NA
def test_floating_array_constructor():
values = np.array([1, 2, 3, 4], dtype="float64")
mask = np.array([False, False, False, True], dtype="bool")
result = FloatingArray(values, mask)
expected = pd.array([1, 2, 3, np.nan], dtype="Float64")
tm.assert_extension_array_equal(result, expected)
tm.assert_numpy_array_equal(result._data, values)
tm.assert_numpy_array_equal(result._mask, mask)
msg = r".* should be .* numpy array. Use the 'pd.array' function instead"
with pytest.raises(TypeError, match=msg):
FloatingArray(values.tolist(), mask)
with pytest.raises(TypeError, match=msg):
FloatingArray(values, mask.tolist())
with pytest.raises(TypeError, match=msg):
FloatingArray(values.astype(int), mask)
msg = r"__init__\(\) missing 1 required positional argument: 'mask'"
with pytest.raises(TypeError, match=msg):
FloatingArray(values)
def test_floating_array_disallows_float16(request):
# GH#44715
arr = np.array([1, 2], dtype=np.float16)
mask = np.array([False, False])
msg = "FloatingArray does not support np.float16 dtype"
with pytest.raises(TypeError, match=msg):
FloatingArray(arr, mask)
if np_version_under1p19 or (
locale.getlocale()[0] != "en_US" and not is_platform_windows()
):
# the locale condition may need to be refined; this fails on
# the CI in the ZH_CN build
# https://github.com/numpy/numpy/issues/20512
mark = pytest.mark.xfail(reason="numpy does not raise on np.dtype('Float16')")
request.node.add_marker(mark)
with pytest.raises(TypeError, match="data type 'Float16' not understood"):
pd.array([1.0, 2.0], dtype="Float16")
def test_floating_array_constructor_copy():
values = np.array([1, 2, 3, 4], dtype="float64")
mask = np.array([False, False, False, True], dtype="bool")
result = FloatingArray(values, mask)
assert result._data is values
assert result._mask is mask
result = FloatingArray(values, mask, copy=True)
assert result._data is not values
assert result._mask is not mask
def test_to_array():
result = pd.array([0.1, 0.2, 0.3, 0.4])
expected = pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize(
"a, b",
[
([1, None], [1, pd.NA]),
([None], [pd.NA]),
([None, np.nan], [pd.NA, pd.NA]),
([1, np.nan], [1, pd.NA]),
([np.nan], [pd.NA]),
],
)
def test_to_array_none_is_nan(a, b):
result = | pd.array(a, dtype="Float64") | pandas.array |
from unittest import TestCase
import pandas as pd
import numpy as np
from moonstone.normalization.counts.geometric_mean import (
GeometricMeanNormalization
)
class TestGeometricMeanNormalization(TestCase):
def setUp(self):
data = [
[255, 26, 48, 75],
[366, 46, 78, 0],
[955, 222, 46, 65],
[89, 54, 145, 29]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", 'Gen_2', "Gen_3", 'Gen_4']
self.dummy_df = pd.DataFrame(data, columns=column_names, index=ind)
def test_check_format(self):
tested_object = GeometricMeanNormalization(self.dummy_df)
pd.testing.assert_frame_equal(tested_object.df, self.dummy_df)
def test_non_zero_df(self):
tested_object = GeometricMeanNormalization(self.dummy_df, zero_threshold=80)
data = [
[255, 26, 48, 75],
[955, 222, 46, 65],
[89, 54, 145, 29]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", "Gen_3", 'Gen_4']
expected_result = pd.DataFrame(data, columns=column_names, index=ind).astype('float')
pd.testing.assert_frame_equal(tested_object.non_zero_df(self.dummy_df), expected_result)
def test_non_zero_df_threshold70(self):
tested_object = GeometricMeanNormalization(self.dummy_df, zero_threshold=70, normalization_level=0)
data = [
[255, 26, 48, 75],
[366, 46, 78, np.nan],
[955, 222, 46, 65],
[89, 54, 145, 29]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", 'Gen_2', "Gen_3", 'Gen_4']
expected_result = pd.DataFrame(data, columns=column_names, index=ind).astype('float')
pd.testing.assert_frame_equal(tested_object.non_zero_df(self.dummy_df), expected_result)
def test_log_df(self):
input_data = [
[255, 26, 48, 75],
[955, 222, 46, 65],
[89, 54, 145, 29]
]
input_column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
input_ind = ["Gen_1", "Gen_3", 'Gen_4']
input_df = pd.DataFrame(input_data, columns=input_column_names, index=input_ind)
tested_object = GeometricMeanNormalization(self.dummy_df)
data = [
[5.541264, 3.258097, 3.871201, 4.317488],
[6.861711, 5.402677, 3.828641, 4.174387],
[4.488636, 3.988984, 4.976734, 3.367296]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", "Gen_3", 'Gen_4']
expected_result = pd.DataFrame(data, columns=column_names, index=ind)
pd.testing.assert_frame_equal(tested_object.log_df(input_df), expected_result)
def test_log_base_n_df(self):
input_data = [
[255, 26, 48, 75],
[955, 222, 46, 65],
[89, 54, 145, 29]
]
input_column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
input_ind = ["Gen_1", "Gen_3", 'Gen_4']
input_df = pd.DataFrame(input_data, columns=input_column_names, index=input_ind)
tested_object = GeometricMeanNormalization(self.dummy_df, log_number=10)
data = [
[2.406540, 1.414973, 1.681241, 1.875061],
[2.980003, 2.346353, 1.662758, 1.812913],
[1.949390, 1.732394, 2.161368, 1.462398]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", "Gen_3", 'Gen_4']
expected_result = pd.DataFrame(data, columns=column_names, index=ind)
pd.testing.assert_frame_equal(tested_object.log_df(input_df),
expected_result)
def test_removed_zero_df_None(self):
data = [
[255, 26, 48, 75],
[366, 0, 78, 0],
[955, 0, 46, 65],
[89, 54, 145, 29]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", 'Gen_2', "Gen_3", 'Gen_4']
dummy_df = pd.DataFrame(data, columns=column_names, index=ind)
tested_object = GeometricMeanNormalization(dummy_df)
expected_result = None
self.assertEqual(tested_object.removed_zero_df, expected_result)
def test_removed_zero_df(self):
data = [
[255, 26, 48, 75],
[366, 0, 78, 0],
[955, 0, 46, 65],
[89, 54, 145, 29]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", 'Gen_2', "Gen_3", 'Gen_4']
dummy_df = pd.DataFrame(data, columns=column_names, index=ind)
tested_object = GeometricMeanNormalization(dummy_df)
data = [
[366, 0, 78, 0],
[955, 0, 46, 65],
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ['Gen_2', "Gen_3"]
expected_result = pd.DataFrame(data, columns=column_names, index=ind)
scaling_factors = tested_object.scaling_factors # noqa
pd.testing.assert_frame_equal(tested_object.removed_zero_df,
expected_result)
def test_calculating_and_substracting_mean_row(self):
input_data = [
[5.541264, 3.258097, 3.871201, 4.317488],
[6.861711, 5.402677, 3.828641, 4.174387],
[4.488636, 3.988984, 4.976734, 3.367296]
]
input_column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
input_ind = ["Gen_1", "Gen_3", 'Gen_4']
input_df = pd.DataFrame(input_data, columns=input_column_names, index=input_ind)
tested_object = GeometricMeanNormalization(self.dummy_df)
data = [
[1.294251, -0.988916, -0.375811, 0.070475],
[1.794857, 0.335823, -1.238213, -0.892467],
[0.283224, -0.216428, 0.771321, -0.838117]
]
column_names = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
ind = ["Gen_1", "Gen_3", 'Gen_4']
expected_result = pd.DataFrame(data, columns=column_names, index=ind)
pd.testing.assert_frame_equal(tested_object.calculating_and_substracting_mean_row(input_df).round(6),
expected_result)
def test_scaling_factor_zero_thresh_100(self):
tested_object = GeometricMeanNormalization(self.dummy_df, zero_threshold=100)
data = [3.648263, 0.805390, 0.686732, 0.432524]
ind = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
expected_result = pd.Series(data, index=ind)
pd.testing.assert_series_equal(tested_object.scaling_factors, expected_result)
def test_scaling_factor_zero_thresh_80(self):
tested_object = GeometricMeanNormalization(self.dummy_df, zero_threshold=80)
data = [3.648263, 0.805390, 0.686732, 0.432524]
ind = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
expected_result = pd.Series(data, index=ind)
pd.testing.assert_series_equal(tested_object.scaling_factors, expected_result)
def test_scaling_factor_zero_thresh_70(self):
tested_object = GeometricMeanNormalization(self.dummy_df, zero_threshold=70)
data = [3.495248, 0.612726, 0.699505, 0.432524]
ind = ['Sample_1', 'Sample_2', 'Sample_3', 'Sample_4']
expected_result = | pd.Series(data, index=ind) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 14:06:09 2018
@author: ashkrelja
"""
#import data
import pandas as pd
import numpy as np
path = 'C:/Users/ashkrelja/Documents/Wall_Street_Lending/Technology/Analytics/Operations_Analytics/2019/Operations Analytics_03_2018.csv'
df = pd.read_csv(path, usecols = ['Status_ClosedDate', 'Loan_LoanWith'])
#manipulate data
df.dropna(inplace = True)
df['Status_ClosedDate'] = df['Status_ClosedDate'].apply(lambda x: pd.to_datetime(x))
df.set_index('Status_ClosedDate', inplace = True)
df2 = df.resample('MS').sum()
df2 = df2.loc['2013-01-01T00:00:00.000000000':]
df2.plot()
#X13 seasonal decomposition
from statsmodels.tsa.x13 import x13_arima_analysis
output = x13_arima_analysis(df2['Loan_LoanWith'])
df2['trend'] = output.trend
df2['seasadj'] = output.seasadj
df2['irregular'] = output.irregular
df2['seasonal'] = df2['Loan_LoanWith'] - df2['seasadj']
df2['seasadj_irr'] = df2['seasadj'] - df2['irregular']
df2['seasadj_log'] = df2['seasadj_irr'].apply(lambda x: np.log(x)) #log-series
df2['seasonal'].plot(legend = 'seasonal')
df2['trend'].plot(legend = 'trend')
df2['seasadj'].plot(legend = 'seasadj')
df2['irregular'].plot(legend = 'irregular')
df2['seasadj_irr'].plot(legend = 'fully adjusted')
df2['seasadj_log'].plot() # 1st difference model in order to eliminate trend
df2.head()
#stationarity
from statsmodels.tsa.statespace.tools import diff
from statsmodels.tsa.stattools import adfuller
df2['diff_1_seasadj'] = diff(diff(df2['seasadj_log']))
df2['diff_1_seasadj'].plot()
df2['diff_1_seasadj'].replace(np.NaN,0,inplace=True)
adfuller(df2['diff_1_seasadj']) #reject Ho, conclude Ha: no unit root
#ACF(MA) - PACF(AR)
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
plot_acf(df2['diff_1_seasadj']) # MA(4)
plot_pacf(df2['diff_1_seasadj']) # AR(0)
#self-developed ARIMA
from statsmodels.tsa.arima_model import ARIMA, ARIMAResults
model = ARIMA(df2['seasadj_log'], order=(1,2,4))
model_fit = model.fit(disp=0)
print(model_fit.summary())
#ARIMA(1,2,4) best fit @ AIC = -665.884
#diagnostics
residual_array_1 = pd.DataFrame(model_fit.resid)
residual_array_1.plot()
#residuals fluctuate around 0
residual_array_1.plot(kind='kde')
print(residual_array_1.describe())
#normal distribution of residuals with mean 0
#in-sample prediction vs actual
df2['insample_prediction'] = model_fit.predict(start = '2018-01-01T00:00:00.000000000',
end = '2019-03-01T00:00:00.000000000')
df2['insample_prediction_level'] = model_fit.predict(start = '2018-01-01T00:00:00.000000000',
end = '2019-03-01T00:00:00.000000000',
typ='levels')
model_fit.plot_predict(start = '2018-01-01T00:00:00.000000000',
end = '2019-03-01T00:00:00.000000000',
alpha=0.05)
pd.concat([df2['insample_prediction'],df2['diff_1_seasadj']], axis=1).plot() #2nd differenced prediction vs actual
pd.concat([df2['insample_prediction_level'],df2['seasadj_log']], axis=1).plot() #level prediction vs actual
#performance
from statsmodels.tools.eval_measures import rmse
df2['insample_prediction_level_seas'] = df2['insample_prediction_level'].apply(lambda x: np.exp(x)) + df2['seasonal'] + df2['irregular']
pd.concat([df2['insample_prediction_level_seas'],df2['Loan_LoanWith']],axis = 1).plot()
pred_1= df2['insample_prediction'].loc['2018-01-01T00:00:00.000000000':'2019-03-01T00:00:00.000000000']
obsv_1 = df2['diff_1_seasadj'].loc['2018-01-01T00:00:00.000000000':'2019-03-01T00:00:00.000000000']
rmse(pred_1,obsv_1) #0.0014153190808289856
pred_2 = df2['insample_prediction_level'].loc['2018-01-01T00:00:00.000000000':'2019-03-01T00:00:00.000000000']
obsv_2 = df2['seasadj_log'].loc['2018-01-01T00:00:00.000000000':'2019-03-01T00:00:00.000000000']
rmse(pred_2,obsv_2) #0.0014153190808288993
pred_3 = df2['insample_prediction_level_seas'].loc['2018-01-01T00:00:00.000000000':'2019-03-01T00:00:00.000000000']
obsv_3 = df2['Loan_LoanWith'].loc['2018-01-01T00:00:00.000000000':'2019-03-01T00:00:00.000000000']
rmse(pred_3,obsv_3)
#out-sample forecast
model_fit.plot_predict(start = '2019-04-01T00:00:00.000000000',
end = '2020-03-01T00:00:00.000000000',
plot_insample=False)
os_prediction = model_fit.predict(start = '2019-04-01T00:00:00.000000000',
end = '2020-03-01T00:00:00.000000000',
typ = 'levels')
os_prediction_df = pd.DataFrame(os_prediction, columns=['outsample_prediction_level']).apply(lambda x: np.exp(x))
df3 = pd.concat([os_prediction_df, df2], axis=1)
df3['seasonal'].loc['2019-04-01T00:00:00.000000000':'2020-03-01T00:00:00.000000000'] = df2['seasonal'].loc['2018-04-01T00:00:00.000000000':'2019-03-01T00:00:00.000000000'].values #repeat seasonal values
df3['irregular'].loc['2019-04-01T00:00:00.000000000':'2020-03-01T00:00:00.000000000'] = df2['irregular'].loc['2018-04-01T00:00:00.000000000':'2019-03-01T00:00:00.000000000'].values #repeat irregular values
df3['final_fcst'] = df3['outsample_prediction_level'] + df3['seasonal'] + df3['irregular']
pd.concat([df3['final_fcst'],df3['Loan_LoanWith']], axis =1).plot()
df3.to_csv('C:/Users/ashkrelja/Documents/Wall_Street_Lending/Technology/Analytics/Operations_Analytics/2019/output.csv')
#Challenger Model
from pyramid.arima import auto_arima
stepwise_model = auto_arima(df2['seasadj_log'],
start_p = 1,
start_q = 1,
max_p = 10,
max_q = 10,
m = 12,
seasonal = False,
trace = True,
d = 2,
suppress_warnings = True,
stepwise = True,
with_intercept = False)
stepwise_model.summary()
#ARIMA(1,1,1) best fit @ AIC = 161.994
#diagnostic tests
residual_array = stepwise_model.resid()
residuals = | pd.DataFrame(residual_array) | pandas.DataFrame |
import pandas as pd
import re
import sys
from pymicruler.utils import util
from pymicruler.bp.BlockProcessor import BlockProcessor as BP
from pymicruler.bp.BlockInterpreter import BlockInterpreter as BI
from pymicruler.bp.NoteAnalysis import NoteAnalysis as NA
from pymicruler.bp.TaxonomyHandler import TaxonomyHandler as TH
from pymicruler.bp.ResourceCompiler import ResourceCompiler as RC
class EucastParser:
def __init__(self):
self.ires = pd.read_csv(util.Path.IRES.value)
self.note_analyser = NA()
self.b_i = BI()
self.r_c = RC()
self.all_sheets = pd.DataFrame()
self.table = pd.DataFrame()
self.guidelines = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.