prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from os.path import join
import networkx as nx
import socket
import sys
from scseirx.model_school import SEIRX_school
from scseirx import analysis_functions as af
def compose_agents(measures, simulation_params):
'''
Utility function to compose agent dictionaries as expected by the simulation
model as input from the dictionary of prevention measures.
Parameters
----------
prevention_measures : dictionary
Dictionary of prevention measures. Needs to include the fields
(student, teacher, family_member) _screen_interval, index_probability
and _mask.
Returns
-------
agent_types : dictionary of dictionaries
Dictionary containing the fields "screening_interval",
"index_probability" and "mask" for the agent groups "student", "teacher"
and "family_member".
'''
agent_types = {
'student':{
'screening_interval':measures['student_screen_interval'],
'index_probability':simulation_params['student_index_probability'],
'mask':measures['student_mask'],
'voluntary_testing_rate':1},
'teacher':{
'screening_interval': measures['teacher_screen_interval'],
'index_probability': simulation_params['student_index_probability'],
'mask':measures['teacher_mask'],
'voluntary_testing_rate':1},
'family_member':{
'screening_interval':measures['family_member_screen_interval'],
'index_probability':simulation_params['family_member_index_probability'],
'mask':measures['family_member_mask'],
'voluntary_testing_rate':1}
}
return agent_types
def run_model(G, agent_types, measures, simulation_params, index_case,
base_transmission_risk_multiplier=1.0, seed=None, N_steps=1000):
'''
Runs a simulation with an SEIRX_school model
(see https://pypi.org/project/scseirx/1.3.0/), given a set of parameters
which are calibrated.
Parameters:
-----------
G : networkx Graph
Contact network of the given school.
agent_types : dict
Dictionary of dictionaries, holding agent-specific information for each
agent group.
measures : dictionary
Dictionary listing all prevention measures in place for the given
scenario. Fields that are not specifically included in this dictionary
will revert to SEIRX_school defaults.
simulation_params : dictionary
Dictionary holding simulation parameters such as "verbosity" and
"base_transmission_risk". Fields that are not included will revert back
to SEIRX_school defaults.
index_case : string
Agent group from which the index case is drawn. Can be "student" or
"teacher".
seed : integer
Seed for the simulation to fix randomness.
N_steps : integer
Number of maximum steps per run. This is a very conservatively chosen
value that ensures that an outbreak will always terminate within the
allotted time. Most runs are terminated way earlier anyways, as soon as
the outbreak is over.
Returns
-------
model : SEIRX_school model instance holding a completed simulation run and
all associated data.
'''
# initialize the model
model = SEIRX_school(G,
simulation_params['verbosity'],
base_transmission_risk = \
simulation_params['base_transmission_risk'] * \
base_transmission_risk_multiplier,
testing = measures['testing'],
exposure_duration = simulation_params['exposure_duration'],
time_until_symptoms = simulation_params['time_until_symptoms'],
infection_duration = simulation_params['infection_duration'],
quarantine_duration = measures['quarantine_duration'],
subclinical_modifier = simulation_params['subclinical_modifier'],
infection_risk_contact_type_weights = \
simulation_params['infection_risk_contact_type_weights'],
K1_contact_types = measures['K1_contact_types'],
diagnostic_test_type = measures['diagnostic_test_type'],
preventive_screening_test_type = \
measures['preventive_screening_test_type'],
follow_up_testing_interval = \
measures['follow_up_testing_interval'],
liberating_testing = measures['liberating_testing'],
index_case = index_case,
agent_types = agent_types,
age_transmission_risk_discount = \
simulation_params['age_transmission_discount'],
age_symptom_modification = simulation_params['age_symptom_discount'],
mask_filter_efficiency = simulation_params['mask_filter_efficiency'],
transmission_risk_ventilation_modifier = \
measures['transmission_risk_ventilation_modifier'],
seed=seed)
# run the model until the outbreak is over
for i in range(N_steps):
# break if first outbreak is over
if len([a for a in model.schedule.agents if \
(a.exposed == True or a.infectious == True)]) == 0:
break
model.step()
return model
def run_ensemble(N_runs, school_type, measures, simulation_params,
school_characteristics, contact_network_src, res_path, index_case,
ttype='same_day_antigen', s_screen_interval=None,
t_screen_interval=None, student_mask=False,
teacher_mask=False, ventilation_mod=1.0,
s_testing_rate=1.0, t_testing_rate=1.0, f_testing_rate=1.0,
base_transmission_risk_multiplier=1.0,
mask_efficiency_exhale=0.5, mask_efficiency_inhale=0.7,
class_size_reduction=0.0, friendship_ratio=0.0,
student_vaccination_ratio=0.0, teacher_vaccination_ratio=0.0,
family_member_vaccination_ratio=0.0, age_transmission_discount=-0.005,
contact_weight=0.3):
'''
Utility function to run an ensemble of simulations for a given school type
and parameter combination.
Parameters:
----------
N_runs : integer
Number of individual simulation runs in the ensemble.
school_type : string
School type for which the model is run. This affects the selected school
characteristics and ratio of index cases between students and teachers.
Can be "primary", "primary_dc", "lower_secondary", "lower_secondary_dc",
"upper_secondary", "secondary" or "secondary_dc".
school_type : string
School type for which the ensemble is run. This affects the selected
school characteristics and ratio of index cases between students and
teachers. Can be "primary", "primary_dc", "lower_secondary",
"lower_secondary_dc", "upper_secondary", "secondary" or "secondary_dc".
measures : dictionary
Dictionary listing all prevention measures in place for the given
scenario. Fields that are not specifically included in this dictionary
will revert to SEIRX_school defaults.
simulation_params : dictionary
Dictionary holding simulation parameters such as "verbosity" and
"base_transmission_risk". Fields that are not included will revert back
to SEIRX_school defaults.
school_characteristics : dictionary
Dictionary holding the characteristics of each possible school type.
Needs to include the fields "classes" and "students" (i.e. the number)
of students per class. The number of teachers is calculated
automatically from the given school type and number of classes.
res_path : string
Path to the directory in which results will be saved.
contact_network_src : string
Absolute or relative path pointing to the location of the contact
network used for the calibration runs. The location needs to hold the
contact networks for each school types in a sub-folder with the same
name as the school type. Networks need to be saved in networkx's .bz2
format.
index_case : string
Agent group from which the index case is drawn. Can be "student" or
"teacher".
ttype : string
Test type used for preventive screening. For example "same_day_antigen"
s_screen_interval : integer
Interval between preventive screens in the student agent group.
t_screen_interval : integer
Interval between preventive screens in the teacher agent group.
student_mask : bool
Wheter or not students wear masks.
teacher_mask : bool
Wheter or not teachers wear masks.
half_classes : bool
Wheter or not class sizes are reduced.
ventilation_mod : float
Modification to the transmission risk due to ventilation.
1 = no modification.
Returns:
--------
ensemble_results : pandas DataFrame
Data Frame holding the observable of interest of the ensemble, namely
the number of infected students and teachers.
'''
characteristics = school_characteristics[school_type]
# create the agent dictionaries based on the given parameter values and
# prevention measures
agent_types = compose_agents(measures, simulation_params)
agent_types['student']['screening_interval'] = s_screen_interval
agent_types['teacher']['screening_interval'] = t_screen_interval
agent_types['student']['mask'] = student_mask
agent_types['teacher']['mask'] = teacher_mask
agent_types['student']['voluntary_testing_rate'] = s_testing_rate
agent_types['teacher']['voluntary_testing_rate'] = t_testing_rate
agent_types['family_member']['voluntary_testing_rate'] = f_testing_rate
agent_types['student']['vaccination_ratio'] = student_vaccination_ratio
agent_types['teacher']['vaccination_ratio'] = teacher_vaccination_ratio
agent_types['family_member']['vaccination_ratio'] = \
family_member_vaccination_ratio
simulation_params['mask_filter_efficiency']['exhale'] = \
mask_efficiency_exhale
simulation_params['mask_filter_efficiency']['inhale'] = \
mask_efficiency_inhale
simulation_params['age_transmission_discount']['slope'] = \
age_transmission_discount
simulation_params['infection_risk_contact_type_weights']['far'] \
= contact_weight
simulation_params['infection_risk_contact_type_weights']['intermediate'] \
= contact_weight
measures['preventive_screening_test_type'] = ttype
measures['transmission_risk_ventilation_modifier'] = ventilation_mod
sname = '{}_classes-{}_students-{}'.format(school_type,
characteristics['classes'], characteristics['students'])
school_src = join(contact_network_src, school_type)
# load the contact network, schedule and node_list corresponding to the school
if (class_size_reduction == 0.0) and (friendship_ratio == 0.0):
G = nx.readwrite.gpickle.read_gpickle(\
join(school_src, '{}_network.bz2'.format(sname)))
elif class_size_reduction == 0.5 and (friendship_ratio == 0.0):
# note: there are two versions of the school contact networks with half
# of the students removed, because of how the storage of the contact
# networks for the sensitivity analysis is structured. We therefore
# sequentially try to load two different contact networks, because the
# school_src path might be different
try:
G = nx.readwrite.gpickle.read_gpickle(\
join(school_src, '{}_network_half.bz2'.format(sname)))
except FileNotFoundError:
G = nx.readwrite.gpickle.read_gpickle(\
join(school_src , '{}_removed-{}_network.bz2'\
.format(sname, class_size_reduction)))
elif class_size_reduction not in [0.0, 0.5] and (friendship_ratio == 0.0):
G = nx.readwrite.gpickle.read_gpickle(\
join(school_src , '{}_removed-{}_network.bz2'\
.format(sname, class_size_reduction)))
elif (class_size_reduction == 0) and (friendship_ratio != 0):
try:
G = nx.readwrite.gpickle.read_gpickle(\
join(school_src , '{}_friends-{}_network.bz2'\
.format(sname, friendship_ratio)))
except FileNotFoundError:
G = nx.readwrite.gpickle.read_gpickle(\
join(school_src , '{}_removed-{}_friends-{}_network.bz2'\
.format(sname, class_size_reduction, friendship_ratio)))
elif (class_size_reduction == 0.5) and (friendship_ratio != 0):
G = nx.readwrite.gpickle.read_gpickle(\
join(school_src , '{}_friends-{}_network_half.bz2'\
.format(sname, friendship_ratio)))
elif (class_size_reduction == 0.3) and (friendship_ratio == 0.2):
G = nx.readwrite.gpickle.read_gpickle(\
join(school_src , '{}_removed-{}_friends-{}_network.bz2'\
.format(sname, class_size_reduction, friendship_ratio)))
else:
print('combination of class_size_reduction and friendship_ratio ' +\
'not supported, aborting!')
return
turnovers = {'same':0, 'one':1, 'two':2, 'three':3}
bmap = {True:'T', False:'F'}
turnover, _, test = ttype.split('_')
turnover = turnovers[turnover]
# construct the filename and file path from the parameter values
measure_string = '{}_test-{}_turnover-{}_index-{}_tf-{}_sf-{}_tmask-{}'\
.format(school_type, test, turnover, index_case[0], t_screen_interval,
s_screen_interval, bmap[teacher_mask]) +\
'_smask-{}_vent-{}'\
.format(bmap[student_mask], ventilation_mod) +\
'_stestrate-{}_ttestrate-{}_trisk-{}_meffexh-{}_meffinh-{}'\
.format(s_testing_rate, t_testing_rate,
base_transmission_risk_multiplier, mask_efficiency_exhale,
mask_efficiency_inhale) +\
'_csizered-{}_fratio-{}_svacc-{}_tvacc-{}_fvacc-{}'\
.format(class_size_reduction, friendship_ratio,
student_vaccination_ratio, teacher_vaccination_ratio,
family_member_vaccination_ratio) +\
'_atd-{}_cw-{}'\
.format(age_transmission_discount, contact_weight)
spath_ensmbl = join(res_path, school_type)
# run all simulations in one ensemble (parameter combination) on one core
ensmbl_results = | pd.DataFrame() | pandas.DataFrame |
import seaborn as sns
import pandas as pd
from collections import defaultdict
from matplotlib import colors
import matplotlib.pylab as plt
from scipy.stats import zscore
import scanpy as sc
import matplotlib.pyplot as plt
#______ UTILS________
def reorder_from_labels(labels, index):
# order based on labels:
clust_to_sample = defaultdict(list)
for i,s in enumerate(index):
clust_to_sample[labels[i]] += [s]
new_order = []
for clust,samps in clust_to_sample.items():
new_order += samps
return new_order
def reorder_from_multiple_labels(labels_df,index,labels_order):
clust_to_sample = defaultdict(list)
cur_label = labels_order[0]
labels_order = labels_order[1:]
for i,s in enumerate(index):
clust_to_sample[labels_df.loc[s,cur_label]] += [s]
new_order = []
# impose an order on the samples
clusts = sorted(clust_to_sample.keys())
for clust in clusts:
samps = clust_to_sample[clust]
if len(labels_order) == 0: # base case, just reordering on one label
new_order += samps
else:
new_order += reorder_from_multiple_labels(labels_df, samps,labels_order)
return new_order
def order_labels(df, colname, correct_order):
order = []
for v in correct_order:
if v in df[colname].values:
order+=[v]
print(order)
return df.set_index(colname).loc[order]
def make_proportions_df(adata, x_value, color_value, hue):
tmp = adata.obs.groupby([x_value,color_value])[color_value].count().unstack(color_value).fillna(0)
m=tmp.divide(tmp.sum(axis=1), axis=0)
props = []
i=0
for sample in m.index:
for celltype in m.columns:
vals = [sample,m.loc[sample,celltype],celltype,adata.obs.loc[adata.obs[x_value]==sample,hue].unique()[0]]
props.append(vals)
i+=1
props_df = pd.DataFrame(props,columns=[x_value,x_value+"_proportion",color_value,hue])
props_df[hue]=props_df[hue].astype("category")
return props_df
def qcplots(gran_adata, groupby="leiden", gs4=None,fig=None, donor_colname = "M.Number",sample_colname="sample",include_stackedbars=True):
import matplotlib.gridspec as gridspec
from matplotlib import ticker
if gs4 is None:
if include_stackedbars:
fig=plt.figure(figsize=(7,15))
gs4 = gridspec.GridSpec(6,1)
else:
fig=plt.figure(figsize=(7,11))
gs4 = gridspec.GridSpec(4,1)
ax_tc = fig.add_subplot(gs4[0, 0])
#else:
#gs4 = ax.get_subplotspec()
#ax_tc=ax
sc.pl.violin(gran_adata, "total_counts",groupby=groupby,rotation=90,ax=ax_tc,show=False,stripplot=False)
ax_tc.set_xlabel("")
ax_tc.set_xticklabels([])
ax_tc.set_xticks([])
ax_tc.set_ylabel("n_UMI")
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1,1))
ax_tc.yaxis.set_major_formatter(formatter)
ax_mito = fig.add_subplot(gs4[1, 0])
sc.pl.violin(gran_adata, "percent_mito",groupby=groupby,rotation=90,ax=ax_mito,show=False, stripplot=False)
ax_mito.set_xlabel("")
ax_mito.set_xticklabels([])
ax_mito.set_xticks([])
ax_mito.set_ylabel("%mito")
ax_genes = fig.add_subplot(gs4[2, 0])
sc.pl.violin(gran_adata, "n_genes_by_counts",groupby=groupby,rotation=90,ax=ax_genes,show=False, stripplot=False)
ax_genes.set_xlabel("")
ax_genes.set_xticklabels([])
ax_genes.set_xticks([])
ax_genes.set_ylabel("n_genes")
formatter_g = ticker.ScalarFormatter(useMathText=True)
formatter_g.set_scientific(True)
formatter_g.set_powerlimits((-1,1))
ax_genes.yaxis.set_major_formatter(formatter_g)
ax_doublet = fig.add_subplot(gs4[3, 0])
sc.pl.violin(gran_adata, "doublet_scores",groupby=groupby,rotation=90,ax=ax_doublet,show=False, stripplot=False)
ax_doublet.set_ylabel("doublet\nscores")
if include_stackedbars:
ax_doublet.set_xlabel("")
ax_doublet.set_xticklabels([])
ax_doublet.set_xticks([])
ax_sample = fig.add_subplot(gs4[4, 0])
normalized_stacked_bar_plot(gran_adata, groupby,sample_colname,ax=ax_sample,legend=False)
ax_sample.set_xlabel("")
ax_sample.set_xticklabels([])
ax_sample.set_xticks([])
ax_sample.set_ylabel("pct cells")
ax_monkey = fig.add_subplot(gs4[5, 0])
hh.normalized_stacked_bar_plot(gran_adata, groupby,donor_colname,ax=ax_monkey,legend=False)
ax_monkey.set_ylabel("pct cells")
def draw_clustergraph(adata_all_epi, reslist,y_vals=[],orders=[]):
'''
plots cluster membership graph from anndata object
Prior to running this, clustering must be run at each resolution you are interested in.
You must also have colors saved in adata.uns[<cluster key>_colors] for each resolution (you can force scanpy to do this by plotting a umap with color=<cluster key>)
Also the each cluster resolution column in obs must be of type categorical (also happens when you plot it)
Requires networkx to run
Inputs:
adata - an adata object that meets the requirements listed above
reslist - a list, in order, of the cluster keys for the clustering resolutions of interest (example:["leiden_res0.5","leiden_res0.6","leiden_res0.7"])
this list can also include other cell clusterings, for example labels that came from mergind leiden clusters, etc
y_vals - a dictionary mapping each value in reslist to its height in the figure. Defaults to plotting them 2 points apart
orders - a dictionary mapping each value in reslist to a list specifying the order along the x axis of the categories in that resolution, defaults to a random or alphabetical order
'''
import networkx as nx
# first set up spacings and orderings
if len(y_vals) == 0: # set the y values of each resolution
y_vals = dict(zip(reslist,[i*2 for i,_ in enumerate(reslist)]))
if len(orders) ==0: # chooses an order of nodes for each resolution if not provided
orders = {}
for r in reslist:
orders[r] = [str(i) for i in adata_all_epi.obs[r].unique()]
# space nodes at each resolution along the full x axis
# get max number of clusters
lens = [len(l) for o,l in orders.items()]
maxwidth = max(lens)
x_vals = {}
# calculate the x value for each node so they are spaced along full x axis
for o,l in orders.items():
w = len(l)
spacing = maxwidth/(w*1.0)
x_vals[o] = {v:(i*spacing) for i,v in enumerate(l)}
# calculate edges for each consecutive pair of resolutions in reslist
respairs = [(reslist[i],reslist[i+1]) for i in range(len(reslist)-1)]
edges = []
for res1,res2 in respairs:
# edge weights come from the proportion of cells from each cluster in the top resolution
# that are assigned to each cluster in the next resolution
# if no cells map between a pair of clusters, there is no edge
layer_1_counts = adata_all_epi.obs.groupby([res1,res2]).count()["sample"].unstack()
layer_1_props = layer_1_counts.divide(layer_1_counts.sum(axis=1),axis=0)
edgelist = layer_1_props.stack().reset_index()
# so each node gets a unique name, name nodes <resolution>.<name>
edgelist["top"]=[res1+"."+i for i in edgelist[res1]]
edgelist["bottom"]=[res2+"."+i for i in edgelist[res2]]
# edges are saved as (sender node, receiver node, weight)
edges+=[(edgelist.loc[i,"top"],edgelist.loc[i,"bottom"],edgelist.loc[i,0]) for i in edgelist.index]
# initialize graph and add edges
G = nx.DiGraph()
G.add_weighted_edges_from(edges)
# set graph asthetics for all nodes
all_sizes = {}
all_colors = {}
all_labels = {}
size_mult = 1000 # sizes need a multiplier to make the nodes visible
for r in reslist:
# node sizes are proportional to the proportion of cells in each cluster at that resolution
pcts = adata_all_epi.obs.groupby(r).count()["sample"]/adata_all_epi.obs.groupby(r).count()["sample"].sum()
d = pcts.to_dict()
d_update = {r+"."+str(i):size_mult*d[i] for i in d.keys()}
all_sizes.update(d_update)
# colors come from the adata colors
colors = dict(zip(adata_all_epi.obs[r].cat.categories,adata_all_epi.uns[r+"_colors"]))
col_update = {r+"."+str(i):colors[i] for i in colors.keys()}
all_colors.update(col_update)
# reset the labels so they match the values in obs instead of the <resolution>.<name> node names we used to make node names unique
all_labels.update({r+"."+str(i):i for i in d.keys()})
# set up position of each node depending on the values calculated above for orders and y_vals
pos = {}
for i in G.nodes():
for r in reslist:
if r in i:
res=r
pos[i] = [x_vals[res][i.split(res)[1][1:]],y_vals[res]]
# plot the graph
#
#print(min([G[u][v]['weight'] for u,v in G.edges]))
#print(max([G[u][v]['weight'] for u,v in G.edges]))
nx.draw(G,pos=pos,width= [2*G[u][v]['weight'] for u,v in G.edges],edge_vmin=-1,edge_color= [G[u][v]['weight'] for u,v in G.edges],node_color=[all_colors[i] for i in G.nodes],node_size=[all_sizes[i] for i in G.nodes],labels=all_labels,edge_cmap=plt.cm.Greys)
for r in reslist:
plt.text(-5,y_vals[r],r)
return G, all_colors,all_sizes,all_labels
def draw_clustergraph_from_df(adata_df, reslist,y_vals=[],orders=[]):
'''
LOTS OF REPEATED CODE FROM draw_clustergraph
plots cluster membership graph from dataframe object
Prior to running this, clustering must be run at each resolution you are interested in.
You must also have colors saved in adata.uns[<cluster key>_colors] for each resolution (you can force scanpy to do this by plotting a umap with color=<cluster key>)
Also the each cluster resolution column in obs must be of type categorical (also happens when you plot it)
Requires networkx to run
Inputs:
adata_df - equivalant to obs value of df that meets the requirements listed above
reslist - a list, in order, of the cluster keys for the clustering resolutions of interest (example:["leiden_res0.5","leiden_res0.6","leiden_res0.7"])
this list can also include other cell clusterings, for example labels that came from mergind leiden clusters, etc
y_vals - a dictionary mapping each value in reslist to its height in the figure. Defaults to plotting them 2 points apart
orders - a dictionary mapping each value in reslist to a list specifying the order along the x axis of the categories in that resolution, defaults to a random or alphabetical order
'''
import networkx as nx
# first set up spacings and orderings
if len(y_vals) == 0: # set the y values of each resolution
y_vals = dict(zip(reslist,[i*2 for i,_ in enumerate(reslist)]))
if len(orders) ==0: # chooses an order of nodes for each resolution if not provided
orders = {}
for r in reslist:
orders[r] = [str(i) for i in adata_df[r].unique()]
# space nodes at each resolution along the full x axis
# get max number of clusters
lens = [len(l) for o,l in orders.items()]
maxwidth = max(lens)
x_vals = {}
# calculate the x value for each node so they are spaced along full x axis
for o,l in orders.items():
w = len(l)
spacing = maxwidth/(w*1.0)
x_vals[o] = {v:(i*spacing) for i,v in enumerate(l)}
# calculate edges for each consecutive pair of resolutions in reslist
respairs = [(reslist[i],reslist[i+1]) for i in range(len(reslist)-1)]
edges = []
for res1,res2 in respairs:
# edge weights come from the proportion of cells from each cluster in the top resolution
# that are assigned to each cluster in the next resolution
# if no cells map between a pair of clusters, there is no edge
layer_1_counts = adata_df.groupby([res1,res2]).count()['time_post_partum_weeks'].unstack()
layer_1_props = layer_1_counts.divide(layer_1_counts.sum(axis=1),axis=0)
edgelist = layer_1_props.stack().reset_index()
# so each node gets a unique name, name nodes <resolution>.<name>
edgelist["top"]=[res1+"."+i for i in edgelist[res1]]
edgelist["bottom"]=[res2+"."+i for i in edgelist[res2]]
# edges are saved as (sender node, receiver node, weight)
edges+=[(edgelist.loc[i,"top"],edgelist.loc[i,"bottom"],edgelist.loc[i,0]) for i in edgelist.index]
# initialize graph and add edges
G = nx.DiGraph()
G.add_weighted_edges_from(edges)
# set graph asthetics for all nodes
all_sizes = {}
all_colors = {}
all_labels = {}
size_mult = 1000 # sizes need a multiplier to make the nodes visible
for r in reslist:
# node sizes are proportional to the proportion of cells in each cluster at that resolution
pcts = adata_df.groupby(r).count()['time_post_partum_weeks']/adata_df.groupby(r).count()['time_post_partum_weeks'].sum()
d = pcts.to_dict()
d_update = {r+"."+str(i):size_mult*d[i] for i in d.keys()}
all_sizes.update(d_update)
# colors come from the adata colors
colors = dict(zip(adata_df[r].cat.categories,["#EDF8E9","#BAE4B3","#74C476","#31A354","#006D2C","#02421C","#FFFFFF"]))
col_update = {r+"."+str(i):colors[i] for i in colors.keys()}
all_colors.update(col_update)
# reset the labels so they match the values in obs instead of the <resolution>.<name> node names we used to make node names unique
all_labels.update({r+"."+str(i):i for i in d.keys()})
# set up position of each node depending on the values calculated above for orders and y_vals
pos = {}
for i in G.nodes():
for r in reslist:
if r in i:
res=r
pos[i] = [x_vals[res][i.split(res)[1][1:]],y_vals[res]]
# plot the graph
#
#print(min([G[u][v]['weight'] for u,v in G.edges]))
#print(max([G[u][v]['weight'] for u,v in G.edges]))
nx.draw(G,pos=pos,width= [2*G[u][v]['weight'] for u,v in G.edges],edge_vmin=-1,edge_color= [G[u][v]['weight'] for u,v in G.edges],node_color=[all_colors[i] for i in G.nodes],node_size=[all_sizes[i] for i in G.nodes],labels=all_labels,edge_cmap=plt.cm.Greys)
for r in reslist:
plt.text(-1,y_vals[r],r)
return G, all_colors,all_sizes,all_labels
#________ STACKED BAR PLOTS____________
def normalized_stacked_bar_plot(adata, x_value, color_value, palette=None, legend=True,ax=None,x_order=None,color_order=None, log=False):
if color_value+"_colors" in adata.uns:
color_dict = dict(zip(adata.obs[color_value].cat.categories,adata.uns[color_value+"_colors"]))
if color_order is not None:
palette = colors.ListedColormap([color_dict[c] for c in color_order])
else:
palette = colors.ListedColormap(adata.uns[color_value+"_colors"])
if x_value=="milk stage":
df = order_labels(adata.obs, x_value, ["early", "transitional","transitional ","mature","late"])
else:
df = adata.obs
tmp = df.groupby([x_value,color_value])[color_value].count().unstack(color_value).fillna(0)
if x_order is not None:
tmp = tmp.loc[x_order]
normed_tmp = tmp.divide(tmp.sum(axis=1), axis=0)
if log == True:
normed_tmp = -1.0*np.log10(normed_tmp)
normed_tmp = normed_tmp.replace(np.inf, 0)
print(normed_tmp)
if color_order is not None:
#normed_tmp.columns = pd.CategoricalIndex(color_order, ordered=True, categories=color_order)
#normed_tmp = normed_tmp.sort_index(axis=1)
normed_tmp = normed_tmp[color_order]
if ax is not None:
normed_tmp.plot(kind='bar',stacked=True, colormap=palette, ax=ax)
else:
ax =normed_tmp.plot(kind='bar',stacked=True, figsize=(5,7), colormap=palette)
plt.ylabel("proportion of cells")
if legend:
plt.legend(loc='upper center', bbox_to_anchor=(1.45, 0.8))
else:
ax.legend().set_visible(False)
if log == False:
ax.set_ylim(0,1.1)
def stacked_bar_plot(adata,x_value,color_value, palette=None, legend=True, ax=None):
if color_value+"_colors" in adata.uns:
palette = colors.ListedColormap(adata.uns[color_value+"_colors"])
#TODO: allow for coloring based on colors stored in adata
#if x_value=="milk stage":
# df = order_labels(adata.obs, x_value, ["early", "transitional ","transitional","mature","late"])
#else:
df = adata.obs
if ax is not None:
df.groupby([x_value,color_value])[color_value].count().unstack(color_value).fillna(0).plot(kind='bar',stacked=True,colormap=palette, ax=ax)
else:
ax=df.groupby([x_value,color_value])[color_value].count().unstack(color_value).fillna(0).plot(kind='bar',stacked=True,colormap=palette)
plt.ylabel("n_cells")
if legend:
plt.legend(loc='upper center', bbox_to_anchor=(1.45, 0.8))
else:
ax.legend().set_visible(False)
def multiple_stacked_bar_plots(adata, plot_sep,x_value,color_value,normed=True,palette=None):
nplots = len(adata.obs[plot_sep].unique())
plot_no = 1
legend = False
for p in adata.obs[plot_sep].unique():
plt.subplot(1,nplots,plot_no)
plot_no += 1
if plot_no==nplots:
legend = True
a = adata[adata.obs[plot_sep]==p]
if normed:
normalized_stacked_bar_plot(a, x_value, color_value, palette=palette, legend=legend)
else:
stacked_bar_plot(a, x_value,color_value)
def normalized_stacked_bar_plot_from_df(df, x_value, color_value, palette=None,color_uns=None,ax=None):
if color_uns is not None:
palette = colors.ListedColormap(color_uns)
#if x_value=="milk stage":
# df = order_labels(adata.obs, x_value, ["early", "transitional","transitional ","mature","late"])
tmp = df.groupby([x_value,color_value])[color_value].count().unstack(color_value).fillna(0)
if ax is not None:
tmp.divide(tmp.sum(axis=1), axis=0).plot(kind='bar',stacked=True, ax=ax, colormap=palette)
else:
ax =tmp.divide(tmp.sum(axis=1), axis=0).plot(kind='bar',stacked=True, figsize=(5,7), colormap=palette)
ax.set_ylabel("proportion of cells")
plt.legend(loc='upper center', bbox_to_anchor=(1.45, 0.8))
ax.set_ylim(0,1.1)
def stacked_bar_with_errorbars(adata, x_value, color_value, hue,palette=None):
props_df = make_proportions_df(adata, x_value, color_value, hue)
mean_props = props_df.groupby([hue,color_value]).agg([np.mean,np.std])#.unstack(color_value)["mean"]#.plot(kind='bar',y="mean",yerr="std",stacked=True)
means = mean_props[('sample_proportion', 'mean')].unstack(color_value)
std = mean_props[('sample_proportion', 'std')].unstack(color_value)
ind = np.arange(means.shape[0])
bottom=np.zeros(means.shape[0])
plt.figure(figsize=(5,10))
if type(palette) == NoneType:
if hue+"_colors" in adata.uns:
palette = dict(zip(adata.obs[hue].cat.categories,adata.uns[hue+"_colors"]))
else:
palette=None
for c in means.columns:
p1 = plt.bar(ind, means[c], .5, yerr=std[c],bottom=bottom, color=palette[c])
bottom = bottom + means[c]
#____grouped by condition plots____
def grouped_dotplot(x_condition,y_condition,genes,adata,ordered_y_condition=[],within_gene_sep = .6,
between_gene_sep = .8):
tmp_obs = adata.obs
for G in genes:
tmp_obs[G] = adata.raw[:,G].X
tmp_obs[G+" on"] = adata.raw[:,G].X > 0
means = tmp_obs.groupby([x_condition,y_condition])[genes].mean().stack().reset_index()
pcts = (tmp_obs.groupby([x_condition,y_condition])[[g+" on" for g in genes]].sum()/tmp_obs.groupby([x_condition,y_condition])[[g+" on" for g in genes]].count())#.stack()
pcts.columns = genes
means["pcts"]=pcts.stack().reset_index()[0]
means.columns = [x_condition,y_condition, "gene","mean","pcts"]
#zscore the means
means["zmeans"] =means.groupby("gene").transform(lambda x: zscore(x,ddof=1))["mean"]
means["x_label_name"]= [means.loc[i,"gene"]+means.loc[i,x_condition] for i in means.index]
x_coords = []#list(range(len(genes)*len(means[x_condition].unique())))
ordered_x_condition = means[x_condition].unique()
x_labelnames = []
x_coord_value = 0
linepositions = []
gene_label_locs = []
for g in genes:
x_labelnames += [g+l for l in ordered_x_condition]
x_coords += [x_coord_value + between_gene_sep,]+ [x_coord_value+between_gene_sep + (l+1)*within_gene_sep for l in range(len(ordered_x_condition)-1)]
added_space = between_gene_sep+(within_gene_sep*(len(ordered_x_condition)-1))
gene_label_locs+=[x_coord_value + between_gene_sep+(within_gene_sep*((len(ordered_x_condition)-1)/2.0))]
x_coord_value+= added_space
linepositions += [x_coord_value + (between_gene_sep/2.0)]
x_coord_map = dict(zip(x_labelnames,x_coords))
means["xcoord"]= means["x_label_name"].map(x_coord_map)
if len(ordered_y_condition) == 0:
ordered_y_condition = means[y_condition].unique()
y_coords = range(len(ordered_y_condition))
y_coord_map =dict(zip(ordered_y_condition, y_coords))
means["ycoord"] = means[y_condition].map(y_coord_map)
figheight=len(y_coords)*.38
figwidth=len(x_coords)*.4
plt.figure(figsize=(figwidth,figheight))
ax=sns.scatterplot(data=means, x= "xcoord",y="ycoord",hue="zmeans",size="pcts",palette="Reds",sizes=(0, 250))
ax.set_xticks(x_coords)
ax.set_xticklabels(list(ordered_x_condition)*len(genes))
ax.set_yticks(y_coords)
ax.set_yticklabels(ordered_y_condition)
ax.set_xlabel("")
ax.set_ylabel("")
plt.xticks(rotation = 90)
ax.set_ylim((ax.get_ylim()[0]-.3,ax.get_ylim()[1]+.3))
#ax.set_xlim((ax.get_xlim()[0]+(between_gene_sep-within_gene_sep),ax.get_xlim()[1]-(between_gene_sep-within_gene_sep)))
for i,g in enumerate(genes):
plt.text(gene_label_locs[i],ax.get_ylim()[1]+.3,g,horizontalalignment='center',multialignment="center")
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
for xc in linepositions[:-1]:
plt.axvline(x=xc, color='grey')
def boxplot_sample_proportions(adata, x_value, color_value,hue="treatment",figsize=(10,5), plottype="box",order=None,hue_order=None,edgecolor=False,swap=False):
tmp = adata.obs.groupby([x_value,color_value])[color_value].count().unstack(color_value).fillna(0)
m=tmp.divide(tmp.sum(axis=1), axis=0)
props = []
i=0
if hue+"_colors" in adata.uns and not swap:
color_dict = dict(zip(adata.obs[hue].cat.categories,adata.uns[hue+"_colors"]))
elif color_value+"_colors" in adata.uns and swap:
color_dict = dict(zip(adata.obs[color_value].cat.categories,adata.uns[color_value+"_colors"]))
else:
color_dict=None
for sample in m.index:
for celltype in m.columns:
vals = [sample,m.loc[sample,celltype],celltype,adata.obs.loc[adata.obs[x_value]==sample,hue].unique()[0]]
props.append(vals)
i+=1
props_df = | pd.DataFrame(props,columns=[x_value,x_value+"_proportion",color_value,hue]) | pandas.DataFrame |
import pandas as pd
from scipy.stats import spearmanr
import numpy as np
def find_complexes(tables_containing_list_complexes, protein_table,
feature_count_start_column, feature_count_end_column,
output_table):
tables_containing_list_complexes_df = pd.read_excel(tables_containing_list_complexes)
protein_table_df = pd.read_csv(protein_table, sep='\t')
selected_complexes = get_into_excel_complexes_table(tables_containing_list_complexes_df,
protein_table_df,
feature_count_start_column,
feature_count_end_column)
table_with_selected_complexes = modifying_first_letter(selected_complexes)
table_with_complete_complexes = correlation(table_with_selected_complexes, output_table)
finding_number_of_complexes(table_with_complete_complexes)
def get_into_excel_complexes_table(tables_containing_list_complexes, protein_table,
feature_count_start_column,
feature_count_end_column):
output_df = pd.DataFrame()
for column_idx in tables_containing_list_complexes:
col = tables_containing_list_complexes[column_idx].dropna()
for row in range(0, col.size):
complex_name = col.name
gene_name = col[row]
selected_df = protein_table.loc[protein_table['Gene.names'] == gene_name]
selected_df = selected_df.iloc[:, int(feature_count_start_column): feature_count_end_column]
cols = list(selected_df)
if selected_df.empty:
# print("empty")
selected_df = pd.DataFrame().append({'complex_name': complex_name, "gene": gene_name}, ignore_index=True)
else:
selected_df['complex_name'] = complex_name
selected_df['gene'] = gene_name
cols = ['complex_name', 'gene'] + cols
selected_df = selected_df.loc[:, cols]
output_df = output_df.append(selected_df)
return output_df
def modifying_first_letter(table_with_complexes):
table_capitalized = table_with_complexes.gene.str.capitalize()
table_capitalized_last = table_capitalized.apply(lambda s: (s[:1].upper() + s[1:-1] + s[-1:].upper())[:len(s)])
table_capitalized_df = pd.Series.to_frame(table_capitalized_last)
table_concat = pd.concat([table_capitalized_df, table_with_complexes], axis=1)
return table_concat
def correlation(table_with_selected_complexes, output_table):
table_dropped = table_with_selected_complexes.iloc[:, 1:-1]
series_by_complex = {}
for index, row in table_dropped.iterrows():
complex_name = row['complex_name']
if complex_name not in series_by_complex:
series_by_complex[complex_name] = []
series_by_complex[complex_name].append(row.drop('complex_name'))
table_with_selected_complexes['rho_median'] = None
table_with_selected_complexes['rho_mean'] = None
table_with_selected_complexes['standard_deviation'] = None
for key in series_by_complex.keys():
series = series_by_complex[key]
series_df = | pd.DataFrame.from_records(series) | pandas.DataFrame.from_records |
"""PyChamberFlux I/O module containing a collection of data parsers."""
import pandas as pd
# A collection of parsers for timestamps stored in multiple columns.
# Supports only the ISO 8601 format (year-month-day).
# Does not support month-first (American) or day-first (European) format.
timestamp_parsers = {
# date only
'ymd': lambda s: | pd.to_datetime(s, format='%Y %m %d') | pandas.to_datetime |
from uin_fc_lib import ts_forecasts, ml_visualizations
import pandas as pd
import numpy as np
import keras as k
from keras.wrappers.scikit_learn import KerasRegressor
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from . import utils
import hashlib
from . import fc_frame
class TF_LSTM_Regressor(object):
def __init__(self, input_dim, validation_ratio=.3, look_back=1):
# fix random seed for reproducibility
self.look_back = look_back
self.validation_ratio = validation_ratio
seed = 7
np.random.seed(seed)
# evaluate model with standardized dataset
self.input_dim = input_dim
print(self.input_dim)
early_stopping = EarlyStopping(monitor='val_loss', patience=5, mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, epsilon=1e-4,
mode='min')
mcp_save = ModelCheckpoint('md.hdf5', save_best_only=True, monitor='val_loss', mode='min')
estimators = []
#estimators.append(('standardize', StandardScaler()))
estimators.append(('mlp', KerasRegressor(
build_fn=self.baseline_model,
epochs=50,
batch_size=256,
verbose=1,
callbacks=[early_stopping, reduce_lr_loss], # , mcp_save],
validation_split=self.validation_ratio
)))
self.pipeline = Pipeline(estimators)
print('model compiled')
# convert an array of values into a dataset matrix
def baseline_model(self):
# create and fit the LSTM network
model = k.models.Sequential()
model.add(k.layers.LSTM(4, input_shape=self.input_dim))
model.add(k.layers.Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
return model
def predict(self, X):
return self.pipeline.predict(X)
def fit(self, X, y):
self.pipeline.fit(X, y)
class TF_Regressor1(object):
def __init__(self, input_dim, validation_ratio=.3):
# fix random seed for reproducibility
self.validation_ratio = validation_ratio
seed = 7
np.random.seed(seed)
# evaluate model with standardized dataset
self.input_dim = input_dim
print(self.input_dim)
early_stopping = EarlyStopping(monitor='val_loss', patience=5, mode='min')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, epsilon=1e-4,
mode='min')
mcp_save = ModelCheckpoint('md.hdf5', save_best_only=True, monitor='val_loss', mode='min')
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('mlp', KerasRegressor(
build_fn=self.baseline_model,
epochs=50,
batch_size=128,
verbose=1,
callbacks=[early_stopping, reduce_lr_loss], # , mcp_save],
validation_split=self.validation_ratio
)))
self.pipeline = Pipeline(estimators)
def baseline_model(self):
model = k.models.Sequential()
model.add(k.layers.Dense(32, kernel_initializer='normal', input_dim=self.input_dim))
model.add(k.layers.Dropout(.2))
model.add(k.layers.Activation('relu'))
model.add(k.layers.Dense(1, kernel_initializer='normal'))
# also possible is mean_squared_error
#
model.compile(
optimizer='adam',
loss='mean_absolute_error',
)
return model
def predict(self, X):
return self.pipeline.predict(X)
def fit(self, X, y):
self.pipeline.fit(X, y)
def headline_of_X(df, hide_columns, date_column, target):
drop_cols = hide_columns
drop_cols.append(date_column)
drop_cols.append(target)
drop_cols.append('index')
unnamed_cols = df.columns[df.columns.str.startswith('Unnamed:')]
drop_cols.extend(unnamed_cols)
return df.columns[~df.columns.isin(drop_cols)]
def train_tf_regressor1_model(
df=None,
date_column=None,
backtest_settings=None,
target=None,
hide_columns=None,
validation_ratio=0
):
if backtest_settings is None:
backtest_settings = {}
input_dim = len(headline_of_X(df=df, target=target, date_column=date_column, hide_columns=hide_columns))
# subtract target
model = TF_Regressor1(input_dim=input_dim, validation_ratio=validation_ratio)
tfc = ts_forecasts.TFC(df=df, date_column=date_column)
tfc.train_model(target=target, hide_columns=hide_columns, model=model, **backtest_settings)
return tfc
def train_lstm_regressor_model(
df=None,
date_column=None,
backtest_settings=None,
target=None,
hide_columns=None,
validation_ratio=0.2,
look_back=5
):
df.dropna(inplace=True)
if backtest_settings is None:
backtest_settings = {}
if hide_columns is None:
hide_columns = []
input_dim = len(headline_of_X(df=df, target=target, date_column=date_column,
hide_columns=hide_columns))
print(input_dim)
model = TF_LSTM_Regressor(input_dim=(input_dim, look_back),
validation_ratio=validation_ratio)
lstm_columns = hide_columns
lstm_columns.append(target)
lstm_columns.pop(lstm_columns.index(date_column))
print(lstm_columns)
X = create_LSTM_dataset(df[df.columns[~df.columns.isin(hide_columns)]],
look_back=look_back,
date_column=date_column)
tfc = fc_frame.FF(df=X, date_column=date_column)
tfc.train(
target=target,
hide_columns=hide_columns,
model=model,
**backtest_settings)
y = df[target][look_back : ]
#model.fit(X, y)
return X, y, model
def get_key_for_lstm_dataset(df, date_column, look_back):
s = df.columns.__str__() + \
df[df.columns[0]].__str__() + \
df[df.columns[-1]].__str__()
s = s.encode('utf-8')
s = hashlib.sha224(s).hexdigest()
return '%s_%s_%s' % (s, date_column, look_back)
def create_LSTM_dataset(df, date_column, look_back=1):
if not df.index.name == date_column:
df.set_index(date_column, inplace=True)
rm = df.ewm(halflife=100).mean()
rstd = df.ewm(halflife=100).std()
df = (df - rm) / rstd
df.dropna(inplace=True)
key = get_key_for_lstm_dataset(df, date_column, look_back)
df_store = None
df_store = utils.see_if_in_cache(key)
if df_store is not None:
print('processed data loaded from cache')
return df_store
utils.assert_date_monotonic_increasing(df=df, date_column=date_column)
if date_column == df.index.name:
df.reset_index(inplace=True)
dataX = []
for i in range(df.shape[0] - look_back + 1):
a = df.values[i:(i + look_back), :]
dataX.append(a)
X = np.array(dataX)
X = np.reshape(X, (X.shape[0], look_back, X.shape[2]))
q = pd.DataFrame()
q[date_column] = df[date_column][:df.shape[0] - look_back + 1]
q.index.name = '__enum__'
q.reset_index(inplace=True)
for i, col in enumerate(df.columns):
if col == date_column:
continue
q[col] = q.__enum__.map(lambda num: X[num, :, i])
q.drop('__enum__', inplace=True, axis=1)
q.set_index(date_column, inplace=True)
n = q.values
# reshape the Matrix such that it matches the numpy shape
x = np.zeros((n.shape[0], n.shape[1], look_back))
for i in range(n.shape[0]):
for j in range(n.shape[1]):
x[i, j, :] = n[i, j]
x[i, j, :] = n[i, j]
utils.put_in_cache(x, key)
return x
def main():
df = | pd.read_csv('test_data3.csv') | pandas.read_csv |
#!/usr/bin/env python
"""
Script to georeference Nikon D800 images using a GPX track.
Default arguments (filepaths) may have to be edited in the main() function.
REQUIREMENT: Needs to be run on Linux right now and have exiftool installed.
"""
import datetime
import os
import subprocess
import pandas as pd
import gpxpy
# What suffix to relate raw images to
RAW_SUFFIX = "NEF"
OTHER_SUFFIXES = ["jpg", "JPG", "jpeg", "tiff", "tif"]
def check_if_valid_filename(filename: str):
"""Check if a filename corresponds to an image file."""
if len(filename.split(".")) < 2: # Equals true if file does not have a suffix, e.g.: "file", as opposed to "file.jpg"
return False
suffix = filename.split(".")[-1]
# Check if the suffix is in the list of valid suffixes
if suffix not in [RAW_SUFFIX] + OTHER_SUFFIXES:
return False
return True # If it is valid
def get_cam_times(directory: str) -> pd.Series:
"""Get the EXIF capture time from each raw image in a directory as a Pandas Series."""
# Create an empty Pandas series with datetime as its data type
# Index is image filename, date taken is data
cam_times = pd.Series(dtype="datetime64[ns]")
files = os.listdir(directory) # Get list of all files in the directory
for i, file in enumerate(files): # Loop over all files
# Check if valid image file
if not check_if_valid_filename(file):
continue
# For every 50 images, print a progress update (process may take a while)
if i % 50 == 0 and i != 0:
print(f"File {i} / {len(files)}")
# Get the full path ("image_dir/image.jpg")
full_path = os.path.join(directory, file)
# Use exiftool in a shell environment, filter out the "Create Date" and take the last entry (there are duplicates of the same one)
exiftool_output = subprocess.check_output(f"exiftool {full_path} | grep 'Create Date' | tail -1", shell=True)
# Do some string magic to extract only the date and time from the output
date = exiftool_output.decode("utf-8").split(" : ")[1].strip()
# Convert to a DateTime object and add to the series
cam_times[file] = pd.to_datetime(date, format="%Y:%m:%d %H:%M:%S.%f")
return cam_times
def get_time_diff(photo_sync_directory: str, gps_time_file: str) -> datetime.datetime:
"""Get the time difference between the GPS time and the camera's internal time by comparing photographs of waypoints."""
# Create empty Pandas Dataframe
times = pd.DataFrame(columns=["cam", "gps"], dtype="datetime64")
# Get the times from the camera and add them to the dataframe
cam_times = get_cam_times(photo_sync_directory)
times.loc[:, "cam"] = cam_times
# Open the GPS time file and add the times to the dataframe
# It is structured as: *picture filename*,*equivalent gps time*
with open(gps_time_file) as file:
for line in file.readlines():
cam, gps_time = line.split(",")
times.loc[cam, "gps"] = pd.to_datetime(gps_time, format="%Y-%m-%d %H:%M:%S")
# Get the time differences
# Type correction (.astype) may not be neccessary anymore.
times["diff"] = times["cam"] - times["gps"].astype("datetime64")
# Get the mean time offset
diff = times["diff"].mean()
# Round the diff to nearest 1/10th of a second
# The Nikon camera data is only shown to 1/10th of a second.
offset = round(diff.microseconds / 1e5) * int(1e5) - diff.microseconds
diff += pd.Timedelta(microseconds=offset)
return diff
def read_gpx(gpx_file: str) -> pd.DataFrame:
"""Read a GPX file and return a Pandas Dataframe."""
# Create empty Pandas dataframe
coords = pd.DataFrame(columns=["lon", "lat", "elev"])
# Open the GPX file and get every track and segment within it
with open(gpx_file) as file:
gpx = gpxpy.parse(file)
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
# Put each point in the coords dataframe
coords.loc[pd.to_datetime(point.time)] = point.longitude, point.latitude, point.elevation
# Convert index from regular datetime to nanosecond datetime
coords.index = coords.index.astype("datetime64[ns]")
return coords
def georeference(coords: pd.DataFrame, destination_folder: str):
"""Use the GNSS coords file to apply the georeferencing to images in a folder."""
# Get every coordinate with a valid image related to it.
cam_coords = coords.dropna()
for file in os.listdir(destination_folder):
# Check if valid image file
if not check_if_valid_filename(file):
continue
# Get full path of image
full_path = os.path.join(destination_folder, file)
suffix = file.split(".")[-1]
# Loop through all image coordinates and try to match the current file with an entry
for i, coord in cam_coords.iterrows():
if file.replace(f".{suffix}", "") in coord["photo"]: # If the filename (minus the suffix) matches
cam_coords.drop(i, inplace=True) # Then remove it from the dataframe (to speed up the next loop)
break # And break the loop, thus preserving the 'coord' variable to use further down
# Use exiftool to write the location data
os.system(
f"exiftool -EXIF:GPSLongitude='{coord.lon}' -EXIF:GPSLatitude='{coord.lat}' -EXIF:GPSAltitude='{coord.elev}' -GPSLongitudeRef='East' -GPSLatitudeRef='North' -overwrite_original {full_path}")
def main(photo_sync_directory: str = "ClockSync/", gps_time_file: str = "ClockSync/gps_times.csv", destination_folder: str = "TIF", gpx_file: str = "2020-07-15 145632.gpx", csv_table_out: str = "camera_coordinates.csv"):
"""Run all functions in the correct order to georeference exported images."""
print("Calculating clock difference")
time_diff = get_time_diff(photo_sync_directory, gps_time_file)
print("Getting camera timing metadata")
destination_cam_times = get_cam_times(destination_folder)
# Switch the index and data values with each other
destination_cam_times = | pd.Series(data=destination_cam_times.index.values, index=destination_cam_times.values) | pandas.Series |
# -*- coding: utf-8 -*-
# %reset -f
"""
@author: <NAME>
"""
# Demonstration of Bayesian optimization for multiple y variables
import warnings
import matplotlib.figure as figure
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import norm
from sklearn import model_selection
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern, DotProduct, WhiteKernel, RBF, ConstantKernel
warnings.filterwarnings('ignore')
# settings
fold_number = 10
relaxation_value = 0.01
# load datasets and settings
training_data = pd.read_csv('training_data.csv', encoding='SHIFT-JIS', index_col=0)
x_for_prediction = | pd.read_csv('x_for_prediction.csv', encoding='SHIFT-JIS', index_col=0) | pandas.read_csv |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
from pandas.api.types import is_scalar
from pandas.util._validators import validate_bool_kwarg
from pandas.core.index import _ensure_index_from_sequences
from pandas._libs import lib
from pandas.core.dtypes.cast import maybe_upcast_putmask
from pandas import compat
from pandas.compat import lzip, string_types, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import (
is_bool_dtype,
is_list_like,
is_numeric_dtype,
is_timedelta64_dtype)
from pandas.core.indexing import check_bool_indexer
import warnings
import numpy as np
import ray
import itertools
import io
import sys
import re
from .groupby import DataFrameGroupBy
from .utils import (
_deploy_func,
_map_partitions,
_partition_pandas_dataframe,
to_pandas,
_blocks_to_col,
_blocks_to_row,
_create_block_partitions,
_inherit_docstrings,
_reindex_helper,
_co_op_helper)
from . import get_npartitions
from .index_metadata import _IndexMetadata
@_inherit_docstrings(pd.DataFrame)
class DataFrame(object):
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False, col_partitions=None, row_partitions=None,
block_partitions=None, row_metadata=None, col_metadata=None):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
data (numpy ndarray (structured or homogeneous) or dict):
Dict can contain Series, arrays, constants, or list-like
objects.
index (pandas.Index or list): The row index for this dataframe.
columns (pandas.Index): The column names for this dataframe, in
pandas Index object.
dtype: Data type to force. Only a single dtype is allowed.
If None, infer
copy (boolean): Copy data from inputs.
Only affects DataFrame / 2d ndarray input
col_partitions ([ObjectID]): The list of ObjectIDs that contain
the column dataframe partitions.
row_partitions ([ObjectID]): The list of ObjectIDs that contain the
row dataframe partitions.
block_partitions: A 2D numpy array of block partitions.
row_metadata (_IndexMetadata):
Metadata for the new dataframe's rows
col_metadata (_IndexMetadata):
Metadata for the new dataframe's columns
"""
self._row_metadata = self._col_metadata = None
# Check type of data and use appropriate constructor
if data is not None or (col_partitions is None and
row_partitions is None and
block_partitions is None):
pd_df = pd.DataFrame(data=data, index=index, columns=columns,
dtype=dtype, copy=copy)
# TODO convert _partition_pandas_dataframe to block partitioning.
row_partitions = \
_partition_pandas_dataframe(pd_df,
num_partitions=get_npartitions())
self._block_partitions = \
_create_block_partitions(row_partitions, axis=0,
length=len(pd_df.columns))
# Set in case we were only given a single row/column for below.
axis = 0
columns = pd_df.columns
index = pd_df.index
else:
# created this invariant to make sure we never have to go into the
# partitions to get the columns
assert columns is not None, \
"Columns not defined, must define columns for internal " \
"DataFrame creations"
if block_partitions is not None:
# put in numpy array here to make accesses easier since it's 2D
self._block_partitions = np.array(block_partitions)
axis = 0
else:
if row_partitions is not None:
axis = 0
partitions = row_partitions
elif col_partitions is not None:
axis = 1
partitions = col_partitions
self._block_partitions = \
_create_block_partitions(partitions, axis=axis,
length=len(columns))
if row_metadata is not None:
self._row_metadata = row_metadata.copy()
if col_metadata is not None:
self._col_metadata = col_metadata.copy()
# Sometimes we only get a single column or row, which is
# problematic for building blocks from the partitions, so we
# add whatever dimension we're missing from the input.
if self._block_partitions.ndim < 2:
self._block_partitions = np.expand_dims(self._block_partitions,
axis=axis ^ 1)
assert self._block_partitions.ndim == 2, "Block Partitions must be 2D."
# Create the row and column index objects for using our partitioning.
# If the objects haven't been inherited, then generate them
if self._row_metadata is None:
self._row_metadata = _IndexMetadata(self._block_partitions[:, 0],
index=index, axis=0)
if self._col_metadata is None:
self._col_metadata = _IndexMetadata(self._block_partitions[0, :],
index=columns, axis=1)
def _get_row_partitions(self):
return [_blocks_to_row.remote(*part)
for part in self._block_partitions]
def _set_row_partitions(self, new_row_partitions):
self._block_partitions = \
_create_block_partitions(new_row_partitions, axis=0,
length=len(self.columns))
_row_partitions = property(_get_row_partitions, _set_row_partitions)
def _get_col_partitions(self):
return [_blocks_to_col.remote(*self._block_partitions[:, i])
for i in range(self._block_partitions.shape[1])]
def _set_col_partitions(self, new_col_partitions):
self._block_partitions = \
_create_block_partitions(new_col_partitions, axis=1,
length=len(self.index))
_col_partitions = property(_get_col_partitions, _set_col_partitions)
def __str__(self):
return repr(self)
def _repr_helper_(self):
if len(self._row_metadata) <= 60 and \
len(self._col_metadata) <= 20:
return to_pandas(self)
def head(df, n, get_local_head=False):
"""Compute the head for this without creating a new DataFrame"""
if get_local_head:
return df.head(n)
new_dfs = _map_partitions(lambda df: df.head(n),
df)
index = self.index[:n]
pd_head = pd.concat(ray.get(new_dfs), axis=1, copy=False)
pd_head.index = index
pd_head.columns = self.columns
return pd_head
def tail(df, n, get_local_tail=False):
"""Compute the tail for this without creating a new DataFrame"""
if get_local_tail:
return df.tail(n)
new_dfs = _map_partitions(lambda df: df.tail(n),
df)
index = self.index[-n:]
pd_tail = pd.concat(ray.get(new_dfs), axis=1, copy=False)
pd_tail.index = index
pd_tail.columns = self.columns
return pd_tail
def front(df, n):
"""Get first n columns without creating a new Dataframe"""
cum_col_lengths = self._col_metadata._lengths.cumsum()
index = np.argmax(cum_col_lengths >= 10)
pd_front = pd.concat(ray.get(x[:index+1]), axis=1, copy=False)
pd_front = pd_front.iloc[:, :n]
pd_front.index = self.index
pd_front.columns = self.columns[:n]
return pd_front
def back(df, n):
"""Get last n columns without creating a new Dataframe"""
cum_col_lengths = np.flip(self._col_metadata._lengths,
axis=0).cumsum()
index = np.argmax(cum_col_lengths >= 10)
pd_back = pd.concat(ray.get(x[-(index+1):]), axis=1, copy=False)
pd_back = pd_back.iloc[:, -n:]
pd_back.index = self.index
pd_back.columns = self.columns[-n:]
return pd_back
x = self._col_partitions
get_local_head = False
# Get first and last 10 columns if there are more than 20 columns
if len(self._col_metadata) >= 20:
get_local_head = True
front = front(x, 10)
back = back(x, 10)
col_dots = pd.Series(["..."
for _ in range(len(self.index))])
col_dots.index = self.index
col_dots.name = "..."
x = pd.concat([front, col_dots, back], axis=1)
# If less than 60 rows, x is already in the correct format.
if len(self._row_metadata) < 60:
return x
head = head(x, 30, get_local_head)
tail = tail(x, 30, get_local_head)
# Make the dots in between the head and tail
row_dots = pd.Series(["..."
for _ in range(len(head.columns))])
row_dots.index = head.columns
row_dots.name = "..."
# We have to do it this way or convert dots to a dataframe and
# transpose. This seems better.
result = head.append(row_dots).append(tail)
return result
def __repr__(self):
# We use pandas repr so that we match them.
if len(self._row_metadata) <= 60 and \
len(self._col_metadata) <= 20:
return repr(self._repr_helper_())
# The split here is so that we don't repr pandas row lengths.
result = self._repr_helper_()
final_result = repr(result).rsplit("\n\n", maxsplit=1)[0] + \
"\n\n[{0} rows x {1} columns]".format(len(self.index),
len(self.columns))
return final_result
def _repr_html_(self):
"""repr function for rendering in Jupyter Notebooks like Pandas
Dataframes.
Returns:
The HTML representation of a Dataframe.
"""
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
if len(self._row_metadata) <= 60 and \
len(self._col_metadata) <= 20:
return self._repr_helper_()._repr_html_()
# We split so that we insert our correct dataframe dimensions.
result = self._repr_helper_()._repr_html_()
return result.split("<p>")[0] + \
"<p>{0} rows x {1} columns</p>\n</div>".format(len(self.index),
len(self.columns))
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._row_metadata.index
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._row_metadata.index = new_index
index = property(_get_index, _set_index)
def _get_columns(self):
"""Get the columns for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._col_metadata.index
def _set_columns(self, new_index):
"""Set the columns for this DataFrame.
Args:
new_index: The new index to set this
"""
self._col_metadata.index = new_index
columns = property(_get_columns, _set_columns)
def _arithmetic_helper(self, remote_func, axis, level=None):
# TODO: We don't support `level` right now
if level is not None:
raise NotImplementedError("Level not yet supported.")
axis = pd.DataFrame()._get_axis_number(axis) if axis is not None \
else 0
oid_series = ray.get(_map_partitions(remote_func,
self._col_partitions if axis == 0
else self._row_partitions))
if axis == 0:
# We use the index to get the internal index.
oid_series = [(oid_series[i], i) for i in range(len(oid_series))]
if len(oid_series) > 1:
for df, partition in oid_series:
this_partition = \
self._col_metadata.partition_series(partition)
df.index = \
this_partition[this_partition.isin(df.index)].index
result_series = pd.concat([obj[0] for obj in oid_series],
axis=0, copy=False)
else:
result_series = pd.concat(oid_series, axis=0, copy=False)
result_series.index = self.index
return result_series
def _validate_eval_query(self, expr, **kwargs):
"""Helper function to check the arguments to eval() and query()
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
"""
if isinstance(expr, str) and expr is '':
raise ValueError("expr cannot be an empty string")
if isinstance(expr, str) and '@' in expr:
raise NotImplementedError("Local variables not yet supported in "
"eval.")
if isinstance(expr, str) and 'not' in expr:
if 'parser' in kwargs and kwargs['parser'] == 'python':
raise NotImplementedError("'Not' nodes are not implemented.")
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# The number of dimensions is common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.ndim,
self._row_partitions[0]))
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
result = ray.get(_deploy_func.remote(lambda df: df.ftypes,
self._row_partitions[0]))
result.index = self.columns
return result
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
# The dtypes are common across all partitions.
# The first partition will be enough.
result = ray.get(_deploy_func.remote(lambda df: df.dtypes,
self._row_partitions[0]))
result.index = self.columns
return result
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
all_empty = ray.get(_map_partitions(
lambda df: df.empty, self._row_partitions))
return False not in all_empty
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return np.concatenate(ray.get(_map_partitions(
lambda df: df.values, self._row_partitions)))
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return len(self.index), len(self.columns)
def _update_inplace(self, row_partitions=None, col_partitions=None,
block_partitions=None, columns=None, index=None,
col_metadata=None, row_metadata=None):
"""Updates the current DataFrame inplace.
Behavior should be similar to the constructor, given the corresponding
arguments. Note that len(columns) and len(index) should match the
corresponding dimensions in the partition(s) passed in, otherwise this
function will complain.
Args:
row_partitions ([ObjectID]):
The new partitions to replace self._row_partitions directly
col_partitions ([ObjectID]):
The new partitions to replace self._col_partitions directly
columns (pd.Index):
Index of the column dimension to replace existing columns
index (pd.Index):
Index of the row dimension to replace existing index
Note:
If `columns` or `index` are not supplied, they will revert to
default columns or index respectively, as this function does
not have enough contextual info to rebuild the indexes
correctly based on the addition/subtraction of rows/columns.
"""
assert row_partitions is not None or col_partitions is not None\
or block_partitions is not None, \
"To update inplace, new column or row partitions must be set."
if block_partitions is not None:
self._block_partitions = block_partitions
elif row_partitions is not None:
self._row_partitions = row_partitions
elif col_partitions is not None:
self._col_partitions = col_partitions
if col_metadata is not None:
self._col_metadata = col_metadata
else:
assert columns is not None, \
"Columns must be passed without col_metadata"
self._col_metadata = _IndexMetadata(
self._block_partitions[0, :], index=columns, axis=1)
if row_metadata is not None:
self._row_metadata = row_metadata
else:
# Index can be None for default index, so we don't check
self._row_metadata = _IndexMetadata(
self._block_partitions[:, 0], index=index, axis=0)
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
new_cols = self.columns.map(lambda x: str(prefix) + str(x))
return DataFrame(block_partitions=self._block_partitions,
columns=new_cols,
index=self.index)
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
new_cols = self.columns.map(lambda x: str(x) + str(suffix))
return DataFrame(block_partitions=self._block_partitions,
columns=new_cols,
index=self.index)
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
if not callable(func):
raise ValueError(
"\'{0}\' object is not callable".format(type(func)))
new_block_partitions = np.array([
_map_partitions(lambda df: df.applymap(func), block)
for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index)
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(block_partitions=self._block_partitions,
columns=self.columns,
index=self.index)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, **kwargs):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = pd.DataFrame()._get_axis_number(axis)
if callable(by):
by = by(self.index)
elif isinstance(by, compat.string_types):
by = self.__getitem__(by).values.tolist()
elif is_list_like(by):
mismatch = len(by) != len(self) if axis == 0 \
else len(by) != len(self.columns)
if all([obj in self for obj in by]) and mismatch:
raise NotImplementedError(
"Groupby with lists of columns not yet supported.")
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
return DataFrameGroupBy(self, by, axis, level, as_index, sort,
group_keys, squeeze, **kwargs)
def sum(self, axis=None, skipna=True, level=None, numeric_only=None):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
def remote_func(df):
return df.sum(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only)
return self._arithmetic_helper(remote_func, axis, level)
def abs(self):
"""Apply an absolute value function to all numberic columns.
Returns:
A new DataFrame with the applied absolute value.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
new_block_partitions = np.array([_map_partitions(lambda df: df.abs(),
block)
for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index)
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
new_block_partitions = np.array([_map_partitions(
lambda df: df.isin(values), block)
for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index)
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
new_block_partitions = np.array([_map_partitions(
lambda df: df.isna(), block) for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index,
row_metadata=self._row_metadata,
col_metadata=self._col_metadata)
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
new_block_partitions = np.array([_map_partitions(
lambda df: df.isnull(), block)
for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index)
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
# Each partition should have the same index, so we'll use 0's
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Returns:
A new DataFrame transposed from this DataFrame.
"""
new_block_partitions = np.array([_map_partitions(
lambda df: df.T, block) for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions.T,
columns=self.index,
index=self.columns)
T = property(transpose)
def dropna(self, axis, how, thresh=None, subset=[], inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
raise NotImplementedError("Not yet")
def add(self, other, axis='columns', level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
return self._operator_helper(pd.DataFrame.add, other, axis, level,
fill_value)
def agg(self, func, axis=0, *args, **kwargs):
return self.aggregate(func, axis, *args, **kwargs)
def aggregate(self, func, axis=0, *args, **kwargs):
axis = pd.DataFrame()._get_axis_number(axis)
result = None
if axis == 0:
try:
result = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
kwargs.pop('is_transform', None)
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, *args, **kwargs):
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = getattr(self, 'axis', 0)
kwargs.pop('_level', None)
if isinstance(arg, compat.string_types):
return self._string_function(arg, *args, **kwargs)
# Dictionaries have complex behavior because they can be renamed here.
elif isinstance(arg, dict):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
elif is_list_like(arg):
from .concat import concat
x = [self._aggregate(func, *args, **kwargs)
for func in arg]
new_dfs = [x[i] if not isinstance(x[i], pd.Series)
else pd.DataFrame(x[i], columns=[arg[i]]).T
for i in range(len(x))]
return concat(new_dfs)
elif callable(arg):
self._callable_function(arg, _axis, *args, **kwargs)
else:
# TODO Make pandas error
raise ValueError("type {} is not callable".format(type(arg)))
def _string_function(self, func, *args, **kwargs):
assert isinstance(func, compat.string_types)
f = getattr(self, func, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
assert len(args) == 0
assert len([kwarg
for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = getattr(np, func, None)
if f is not None:
raise NotImplementedError("Numpy aggregates not yet supported.")
raise ValueError("{} is an unknown string function".format(func))
def _callable_function(self, func, axis, *args, **kwargs):
if axis == 0:
partitions = self._col_partitions
else:
partitions = self._row_partitions
if axis == 1:
kwargs['axis'] = axis
kwargs['temp_columns'] = self.columns
else:
kwargs['temp_index'] = self.index
def agg_helper(df, arg, *args, **kwargs):
if 'temp_index' in kwargs:
df.index = kwargs.pop('temp_index', None)
else:
df.columns = kwargs.pop('temp_columns', None)
is_transform = kwargs.pop('is_transform', False)
new_df = df.agg(arg, *args, **kwargs)
is_series = False
if isinstance(new_df, pd.Series):
is_series = True
index = None
columns = None
else:
index = new_df.index \
if not isinstance(new_df.index, pd.RangeIndex) \
else None
columns = new_df.columns
new_df.columns = pd.RangeIndex(0, len(new_df.columns))
new_df.reset_index(drop=True, inplace=True)
if is_transform:
if is_scalar(new_df) or len(new_df) != len(df):
raise ValueError("transforms cannot produce "
"aggregated results")
return is_series, new_df, index, columns
remote_result = \
[_deploy_func._submit(args=(lambda df: agg_helper(df,
func,
*args,
**kwargs),
part), num_return_vals=4)
for part in partitions]
# This magic transposes the list comprehension returned from remote
is_series, new_parts, index, columns = \
[list(t) for t in zip(*remote_result)]
# This part is because agg can allow returning a Series or a
# DataFrame, and we have to determine which here. Shouldn't add
# too much to latency in either case because the booleans can
# be returned immediately
is_series = ray.get(is_series)
if all(is_series):
new_series = pd.concat(ray.get(new_parts))
new_series.index = self.columns if axis == 0 else self.index
return new_series
# This error is thrown when some of the partitions return Series and
# others return DataFrames. We do not allow mixed returns.
elif any(is_series):
raise ValueError("no results.")
# The remaining logic executes when we have only DataFrames in the
# remote objects. We build a Ray DataFrame from the Pandas partitions.
elif axis == 0:
new_index = ray.get(index[0])
columns = ray.get(columns)
columns = columns[0].append(columns[1:])
return DataFrame(col_partitions=new_parts,
columns=columns,
index=self.index if new_index is None
else new_index)
else:
new_index = ray.get(index[0])
columns = ray.get(columns)
columns = columns[0].append(columns[1:])
return DataFrame(row_partitions=new_parts,
columns=columns,
index=self.index if new_index is None
else new_index)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def all(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
def remote_func(df):
return df.all(axis=axis, bool_only=bool_only, skipna=skipna,
level=level, **kwargs)
return self._arithmetic_helper(remote_func, axis, level)
def any(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
"""Return whether any elements are True over requested axis
Note:
If axis=None or axis=0, this call applies on the column partitions,
otherwise operates on row partitions
"""
def remote_func(df):
return df.any(axis=axis, bool_only=bool_only, skipna=skipna,
level=level, **kwargs)
return self._arithmetic_helper(remote_func, axis, level)
def append(self, other, ignore_index=False, verify_integrity=False):
"""Append another DataFrame/list/Series to this one.
Args:
other: The object to append to this.
ignore_index: Ignore the index on appending.
verify_integrity: Verify the integrity of the index on completion.
Returns:
A new DataFrame containing the concatenated values.
"""
if isinstance(other, (pd.Series, dict)):
if isinstance(other, dict):
other = pd.Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
if other.name is None:
index = None
else:
# other must have the same index name as self, otherwise
# index name will be reset
index = pd.Index([other.name], name=self.index.name)
combined_columns = self.columns.tolist() + self.columns.union(
other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = pd.DataFrame(other.values.reshape((1, len(other))),
index=index,
columns=combined_columns)
other = other._convert(datetime=True, timedelta=True)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = pd.DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.loc[:, self.columns]
from .concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
"""Apply a function along input axis of DataFrame.
Args:
func: The function to apply
axis: The axis over which to apply the func.
broadcast: Whether or not to broadcast.
raw: Whether or not to convert to a Series.
reduce: Whether or not to try to apply reduction procedures.
Returns:
Series or DataFrame, depending on func.
"""
axis = pd.DataFrame()._get_axis_number(axis)
if is_list_like(func) and not all([isinstance(obj, str)
for obj in func]):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
if axis == 0 and is_list_like(func):
return self.aggregate(func, axis, *args, **kwds)
if isinstance(func, compat.string_types):
if axis == 1:
kwds['axis'] = axis
return getattr(self, func)(*args, **kwds)
elif callable(func):
return self._callable_function(func, axis=axis, *args, **kwds)
else:
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def as_blocks(self, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def as_matrix(self, columns=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def asfreq(self, freq, method=None, how=None, normalize=False,
fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def asof(self, where, subset=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def assign(self, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def astype(self, dtype, copy=True, errors='raise', **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def at_time(self, time, asof=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def between_time(self, start_time, end_time, include_start=True,
include_end=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='bfill')
"""
new_df = self.fillna(method='bfill',
axis=axis,
limit=limit,
downcast=downcast,
inplace=inplace)
if not inplace:
return new_df
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError("""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all().""")
else:
return to_pandas(self).bool()
def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None, return_type=None,
**kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip_lower(self, threshold, axis=None, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip_upper(self, threshold, axis=None, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def combine(self, other, func, fill_value=None, overwrite=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def combine_first(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def compound(self, axis=None, skipna=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def consolidate(self, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def corr(self, method='pearson', min_periods=1):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def corrwith(self, other, axis=0, drop=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def count(self, axis=0, level=None, numeric_only=False):
"""Get the count of non-null objects in the DataFrame.
Arguments:
axis: 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
level: If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame.
numeric_only: Include only float, int, boolean data
Returns:
The count, in a Series (or DataFrame if level is specified).
"""
def remote_func(df):
return df.count(axis=axis, level=level, numeric_only=numeric_only)
return self._arithmetic_helper(remote_func, axis, level)
def cov(self, min_periods=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def _cumulative_helper(self, func, axis):
axis = pd.DataFrame()._get_axis_number(axis) if axis is not None \
else 0
if axis == 0:
new_cols = _map_partitions(func, self._col_partitions)
return DataFrame(col_partitions=new_cols,
columns=self.columns,
index=self.index)
else:
new_rows = _map_partitions(func, self._row_partitions)
return DataFrame(row_partitions=new_rows,
columns=self.columns,
index=self.index)
def cummax(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative maximum across the DataFrame.
Args:
axis (int): The axis to take maximum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative maximum of the DataFrame.
"""
def remote_func(df):
return df.cummax(axis=axis, skipna=skipna, *args, **kwargs)
return self._cumulative_helper(remote_func, axis)
def cummin(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative minimum across the DataFrame.
Args:
axis (int): The axis to cummin on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative minimum of the DataFrame.
"""
def remote_func(df):
return df.cummin(axis=axis, skipna=skipna, *args, **kwargs)
return self._cumulative_helper(remote_func, axis)
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative product across the DataFrame.
Args:
axis (int): The axis to take product on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative product of the DataFrame.
"""
def remote_func(df):
return df.cumprod(axis=axis, skipna=skipna, *args, **kwargs)
return self._cumulative_helper(remote_func, axis)
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
"""Perform a cumulative sum across the DataFrame.
Args:
axis (int): The axis to take sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The cumulative sum of the DataFrame.
"""
def remote_func(df):
return df.cumsum(axis=axis, skipna=skipna, *args, **kwargs)
return self._cumulative_helper(remote_func, axis)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Args:
percentiles (list-like of numbers, optional):
The percentiles to include in the output.
include: White-list of data types to include in results
exclude: Black-list of data types to exclude in results
Returns: Series/DataFrame of summary statistics
"""
def describe_helper(df):
"""This to ensure nothing goes on with non-numeric columns"""
try:
return df.select_dtypes(exclude='object').describe(
percentiles=percentiles,
include=include,
exclude=exclude)
# This exception is thrown when there are only non-numeric columns
# in this partition
except ValueError:
return pd.DataFrame()
# Begin fixing index based on the columns inside.
parts = ray.get(_map_partitions(describe_helper, self._col_partitions))
# We use the index to get the internal index.
parts = [(parts[i], i) for i in range(len(parts))]
for df, partition in parts:
this_partition = self._col_metadata.partition_series(partition)
df.columns = this_partition[this_partition.isin(df.columns)].index
# Remove index from tuple
result = pd.concat([obj[0] for obj in parts], axis=1, copy=False)
return result
def diff(self, periods=1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def div(self, other, axis='columns', level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self._operator_helper(pd.DataFrame.add, other, axis, level,
fill_value)
def divide(self, other, axis='columns', level=None, fill_value=None):
"""Synonym for div.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self.div(other, axis, level, fill_value)
def dot(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def drop(self, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
"""Return new object with labels in requested axis removed.
Args:
labels: Index or column labels to drop.
axis: Whether to drop labels from the index (0 / 'index') or
columns (1 / 'columns').
index, columns: Alternative to specifying axis (labels, axis=1 is
equivalent to columns=labels).
level: For MultiIndex
inplace: If True, do operation inplace and return None.
errors: If 'ignore', suppress error and existing labels are
dropped.
Returns:
dropped : type of caller
"""
# TODO implement level
if level is not None:
raise NotImplementedError("Level not yet supported for drop")
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and "
"'index'/'columns'")
axis = pd.DataFrame()._get_axis_name(axis)
axes = {axis: labels}
elif index is not None or columns is not None:
axes, _ = pd.DataFrame()._construct_axes_from_arguments((index,
columns),
{})
else:
raise ValueError("Need to specify at least one of 'labels', "
"'index' or 'columns'")
obj = self.copy()
def drop_helper(obj, axis, label):
# TODO(patyang): If you drop from the index first, you can do it
# in batch by returning the dropped items. Likewise coords.drop
# leaves the coords df in an inconsistent state.
if axis == 'index':
try:
coords = obj._row_metadata[label]
if isinstance(coords, pd.DataFrame):
partitions = list(coords['partition'])
indexes = list(coords['index_within_partition'])
else:
partitions, indexes = coords
partitions = [partitions]
indexes = [indexes]
for part, index in zip(partitions, indexes):
x = _deploy_func.remote(
lambda df: df.drop(labels=index, axis=axis,
errors='ignore'),
obj._row_partitions[part])
obj._row_partitions = \
[obj._row_partitions[i] if i != part
else x
for i in range(len(obj._row_partitions))]
# The decrement here is because we're dropping one at a
# time and the index is automatically updated when we
# convert back to blocks.
obj._row_metadata.squeeze(part, index)
obj._row_metadata.drop(labels=label)
except KeyError:
return obj
else:
try:
coords = obj._col_metadata[label]
if isinstance(coords, pd.DataFrame):
partitions = list(coords['partition'])
indexes = list(coords['index_within_partition'])
else:
partitions, indexes = coords
partitions = [partitions]
indexes = [indexes]
for part, index in zip(partitions, indexes):
x = _deploy_func.remote(
lambda df: df.drop(labels=index, axis=axis,
errors='ignore'),
obj._col_partitions[part])
obj._col_partitions = \
[obj._col_partitions[i] if i != part
else x
for i in range(len(obj._col_partitions))]
# The decrement here is because we're dropping one at a
# time and the index is automatically updated when we
# convert back to blocks.
obj._col_metadata.squeeze(part, index)
obj._col_metadata.drop(labels=label)
except KeyError:
return obj
return obj
for axis, labels in axes.items():
if labels is None:
continue
if is_list_like(labels):
for label in labels:
if errors != 'ignore' and label and \
label not in getattr(self, axis):
raise ValueError("The label [{}] is not in the [{}]",
label, axis)
else:
obj = drop_helper(obj, axis, label)
else:
if errors != 'ignore' and labels and \
labels not in getattr(self, axis):
raise ValueError("The label [{}] is not in the [{}]",
labels, axis)
else:
obj = drop_helper(obj, axis, labels)
if not inplace:
return obj
else:
self._row_metadata = obj._row_metadata
self._col_metadata = obj._col_metadata
self._block_partitions = obj._block_partitions
def drop_duplicates(self, subset=None, keep='first', inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def duplicated(self, subset=None, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def eq(self, other, axis='columns', level=None):
"""Checks element-wise that this is equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the eq over.
level: The Multilevel index level to apply eq over.
Returns:
A new DataFrame filled with Booleans.
"""
return self._operator_helper(pd.DataFrame.eq, other, axis, level)
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
# TODO(kunalgosar): Implement Copartition and use to implement equals
def helper(df, index, other_series):
return df.iloc[index['index_within_partition']] \
.equals(other_series)
results = []
other_partition = None
other_df = None
# TODO: Make the appropriate coord df accessor methods for this fxn
for i, idx in other._row_metadata._coord_df.iterrows():
if idx['partition'] != other_partition:
other_df = ray.get(other._row_partitions[idx['partition']])
other_partition = idx['partition']
# TODO: group series here into full df partitions to reduce
# the number of remote calls to helper
other_series = other_df.iloc[idx['index_within_partition']]
curr_index = self._row_metadata._coord_df.iloc[i]
curr_df = self._row_partitions[int(curr_index['partition'])]
results.append(_deploy_func.remote(helper,
curr_df,
curr_index,
other_series))
for r in results:
if not ray.get(r):
return False
return True
def eval(self, expr, inplace=False, **kwargs):
"""Evaluate a Python expression as a string using various backends.
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
parser: The parser to use to construct the syntax tree from the
expression. The default of 'pandas' parses code slightly
different than standard Python. Alternatively, you can parse
an expression using the 'python' parser to retain strict
Python semantics. See the enhancing performance documentation
for more details.
engine: The engine used to evaluate the expression.
truediv: Whether to use true division, like in Python >= 3
local_dict: A dictionary of local variables, taken from locals()
by default.
global_dict: A dictionary of global variables, taken from
globals() by default.
resolvers: A list of objects implementing the __getitem__ special
method that you can use to inject an additional collection
of namespaces to use for variable lookup. For example, this is
used in the query() method to inject the index and columns
variables that refer to their respective DataFrame instance
attributes.
level: The number of prior stack frames to traverse and add to
the current scope. Most users will not need to change this
parameter.
target: This is the target object for assignment. It is used when
there is variable assignment in the expression. If so, then
target must support item assignment with string keys, and if a
copy is being returned, it must also support .copy().
inplace: If target is provided, and the expression mutates target,
whether to modify target inplace. Otherwise, return a copy of
target with the mutation.
Returns:
ndarray, numeric scalar, DataFrame, Series
"""
self._validate_eval_query(expr, **kwargs)
columns = self.columns
def eval_helper(df):
df.columns = columns
result = df.eval(expr, inplace=False, **kwargs)
# If result is a series, expr was not an assignment expression.
if not isinstance(result, pd.Series):
result.columns = pd.RangeIndex(0, len(result.columns))
return result
inplace = validate_bool_kwarg(inplace, "inplace")
new_rows = _map_partitions(eval_helper, self._row_partitions)
result_type = ray.get(_deploy_func.remote(lambda df: type(df),
new_rows[0]))
if result_type is pd.Series:
new_series = pd.concat(ray.get(new_rows), axis=0)
new_series.index = self.index
return new_series
columns_copy = self._col_metadata._coord_df.copy().T
columns_copy.eval(expr, inplace=True, **kwargs)
columns = columns_copy.columns
if inplace:
self._update_inplace(row_partitions=new_rows, columns=columns)
else:
return DataFrame(columns=columns, row_partitions=new_rows)
def ewm(self, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""Synonym for DataFrame.fillna(method='ffill')
"""
new_df = self.fillna(method='ffill',
axis=axis,
limit=limit,
downcast=downcast,
inplace=inplace)
if not inplace:
return new_df
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
"""Fill NA/NaN values using the specified method.
Args:
value: Value to use to fill holes. This value cannot be a list.
method: Method to use for filling holes in reindexed Series pad.
ffill: propagate last valid observation forward to next valid
backfill.
bfill: use NEXT valid observation to fill gap.
axis: 0 or 'index', 1 or 'columns'.
inplace: If True, fill in place. Note: this will modify any other
views on this object.
limit: If method is specified, this is the maximum number of
consecutive NaN values to forward/backward fill. In other
words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method
is not specified, this is the maximum number of entries along
the entire axis where NaNs will be filled. Must be greater
than 0 if not None.
downcast: A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an
appropriate equal type.
Returns:
filled: DataFrame
"""
# TODO implement value passed as DataFrame
if isinstance(value, pd.DataFrame):
raise NotImplementedError("Passing a DataFrame as the value for "
"fillna is not yet supported.")
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = pd.DataFrame()._get_axis_number(axis) \
if axis is not None \
else 0
if isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
if value is None and method is None:
raise ValueError('must specify a fill method or value')
if value is not None and method is not None:
raise ValueError('cannot specify both a fill method and value')
if method is not None and method not in ['backfill', 'bfill', 'pad',
'ffill']:
expecting = 'pad (ffill) or backfill (bfill)'
msg = 'Invalid fill method. Expecting {expecting}. Got {method}'\
.format(expecting=expecting, method=method)
raise ValueError(msg)
if inplace:
new_obj = self
else:
new_obj = self.copy()
parts, coords_obj = (new_obj._col_partitions,
new_obj._col_metadata) if axis == 0 else \
(new_obj._row_partitions,
new_obj._row_metadata)
if isinstance(value, (pd.Series, dict)):
new_vals = {}
value = dict(value)
for val in value:
# Get the local index for the partition
try:
part, index = coords_obj[val]
# Pandas ignores these errors so we will suppress them too.
except KeyError:
continue
new_vals[val] = _deploy_func.remote(lambda df: df.fillna(
value={index: value[val]},
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs), parts[part])
# Not every partition was changed, so we put everything back that
# was not changed and update those that were.
new_parts = [parts[i] if coords_obj.index[i] not in new_vals
else new_vals[coords_obj.index[i]]
for i in range(len(parts))]
else:
new_parts = _map_partitions(lambda df: df.fillna(
value=value,
method=method,
axis=axis,
inplace=False,
limit=limit,
downcast=downcast,
**kwargs), parts)
if axis == 0:
new_obj._update_inplace(col_partitions=new_parts,
columns=self.columns,
index=self.index)
else:
new_obj._update_inplace(row_partitions=new_parts,
columns=self.columns,
index=self.index)
if not inplace:
return new_obj
def filter(self, items=None, like=None, regex=None, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def first(self, offset):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def first_valid_index(self):
"""Return index for first non-NA/null value.
Returns:
scalar: type of index
"""
return self._row_metadata.first_valid_index()
def floordiv(self, other, axis='columns', level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self._operator_helper(pd.DataFrame.floordiv, other, axis, level,
fill_value)
@classmethod
def from_csv(self, path, header=0, sep=', ', index_col=0,
parse_dates=True, encoding=None, tupleize_cols=None,
infer_datetime_format=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_dict(self, data, orient='columns', dtype=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_items(self, items, columns=None, orient='columns'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_records(self, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ge(self, other, axis='columns', level=None):
"""Checks element-wise that this is greater than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
return self._operator_helper(pd.DataFrame.ge, other, axis, level)
def get(self, key, default=None):
"""Get item from object for given key (DataFrame column, Panel
slice, etc.). Returns default value if not found.
Args:
key (DataFrame column, Panel slice) : the key for which value
to get
Returns:
value (type of items contained in object) : A value that is
stored at the key
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
return ray.get(_deploy_func.remote(lambda df: df.get_dtype_counts(),
self._row_partitions[0]))
def get_ftype_counts(self):
"""Get the counts of ftypes in this object.
Returns:
The counts of ftypes in this object.
"""
return ray.get(_deploy_func.remote(lambda df: df.get_ftype_counts(),
self._row_partitions[0]))
def get_value(self, index, col, takeable=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def get_values(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def gt(self, other, axis='columns', level=None):
"""Checks element-wise that this is greater than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the gt over.
level: The Multilevel index level to apply gt over.
Returns:
A new DataFrame filled with Booleans.
"""
return self._operator_helper(pd.DataFrame.gt, other, axis, level)
def head(self, n=5):
"""Get the first n rows of the dataframe.
Args:
n (int): The number of rows to return.
Returns:
A new dataframe with the first n rows of the dataframe.
"""
if n >= len(self._row_metadata):
return self.copy()
new_dfs = _map_partitions(lambda df: df.head(n),
self._col_partitions)
index = self._row_metadata.index[:n]
return DataFrame(col_partitions=new_dfs,
columns=self.columns,
index=index)
def hist(self, data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def idxmax(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the max value of the axis.
Args:
axis (int): Identify the max over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each maximum value for the axis
specified.
"""
if not all([d != np.dtype('O') for d in self.dtypes]):
raise TypeError(
"reduction operation 'argmax' not allowed for this dtype")
def remote_func(df):
return df.idxmax(axis=axis, skipna=skipna)
internal_indices = self._arithmetic_helper(remote_func, axis)
# do this to convert internal indices to correct index
return internal_indices.apply(lambda x: self.index[x])
def idxmin(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the min value of the axis.
Args:
axis (int): Identify the min over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each minimum value for the axis
specified.
"""
if not all([d != np.dtype('O') for d in self.dtypes]):
raise TypeError(
"reduction operation 'argmax' not allowed for this dtype")
def remote_func(df):
return df.idxmin(axis=axis, skipna=skipna)
internal_indices = self._arithmetic_helper(remote_func, axis)
# do this to convert internal indices to correct index
return internal_indices.apply(lambda x: self.index[x])
def infer_objects(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
def info_helper(df):
output_buffer = io.StringIO()
df.info(verbose=verbose,
buf=output_buffer,
max_cols=max_cols,
memory_usage=memory_usage,
null_counts=null_counts)
return output_buffer.getvalue()
# Combine the per-partition info and split into lines
result = ''.join(ray.get(_map_partitions(info_helper,
self._col_partitions)))
lines = result.split('\n')
# Class denoted in info() output
class_string = '<class \'ray.dataframe.dataframe.DataFrame\'>\n'
# Create the Index info() string by parsing self.index
index_string = self.index.summary() + '\n'
# A column header is needed in the inf() output
col_header = 'Data columns (total {0} columns):\n'.format(
len(self.columns))
# Parse the per-partition values to get the per-column details
# Find all the lines in the output that start with integers
prog = re.compile('^[0-9]+.+')
col_lines = [prog.match(line) for line in lines]
cols = [c.group(0) for c in col_lines if c is not None]
# replace the partition columns names with real column names
columns = ["{0}\t{1}\n".format(self.columns[i],
cols[i].split(" ", 1)[1])
for i in range(len(cols))]
col_string = ''.join(columns) + '\n'
# A summary of the dtypes in the dataframe
dtypes_string = "dtypes: "
for dtype, count in self.dtypes.value_counts().iteritems():
dtypes_string += "{0}({1}),".format(dtype, count)
dtypes_string = dtypes_string[:-1] + '\n'
# Compute the memory usage by summing per-partitions return values
# Parse lines for memory usage number
prog = re.compile('^memory+.+')
mems = [prog.match(line) for line in lines]
mem_vals = [float(re.search(r'\d+', m.group(0)).group())
for m in mems if m is not None]
memory_string = ""
if len(mem_vals) != 0:
# Sum memory usage from each partition
if memory_usage != 'deep':
memory_string = 'memory usage: {0}+ bytes'.format(
sum(mem_vals))
else:
memory_string = 'memory usage: {0} bytes'.format(sum(mem_vals))
# Combine all the components of the info() output
result = ''.join([class_string, index_string, col_header,
col_string, dtypes_string, memory_string])
# Write to specified output buffer
if buf:
buf.write(result)
else:
sys.stdout.write(result)
def insert(self, loc, column, value, allow_duplicates=False):
"""Insert column into DataFrame at specified location.
Args:
loc (int): Insertion index. Must verify 0 <= loc <= len(columns).
column (hashable object): Label of the inserted column.
value (int, Series, or array-like): The values to insert.
allow_duplicates (bool): Whether to allow duplicate column names.
"""
if not is_list_like(value):
value = np.full(len(self.index), value)
if len(value) != len(self.index):
raise ValueError(
"Length of values does not match length of index")
if not allow_duplicates and column in self.columns:
raise ValueError(
"cannot insert {0}, already exists".format(column))
if loc > len(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".format(
loc, len(self.columns)))
if loc < 0:
raise ValueError("unbounded slice")
partition, index_within_partition = \
self._col_metadata.insert(column, loc)
# Deploy insert function to specific column partition, and replace that
# column
def insert_col_part(df):
df.insert(index_within_partition, column, value, allow_duplicates)
return df
new_obj = _deploy_func.remote(insert_col_part,
self._col_partitions[partition])
new_cols = [self._col_partitions[i]
if i != partition
else new_obj
for i in range(len(self._col_partitions))]
new_col_names = self.columns.insert(loc, column)
self._update_inplace(col_partitions=new_cols, columns=new_col_names)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', downcast=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def iterrows(self):
"""Iterate over DataFrame rows as (index, Series) pairs.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the rows of the frame.
"""
def update_iterrow(series, i):
"""Helper function to correct the columns + name of the Series."""
series.index = self.columns
series.name = list(self.index)[i]
return series
iters = ray.get([_deploy_func.remote(
lambda df: list(df.iterrows()), part)
for part in self._row_partitions])
iters = itertools.chain.from_iterable(iters)
series = map(lambda s: update_iterrow(s[1][1], s[0]), enumerate(iters))
return zip(self.index, series)
def items(self):
"""Iterator over (column name, Series) pairs.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the columns of the frame.
"""
iters = ray.get([_deploy_func.remote(
lambda df: list(df.items()), part)
for part in self._row_partitions])
def concat_iters(iterables):
for partitions in enumerate(zip(*iterables)):
series = pd.concat([_series for _, _series in partitions[1]])
series.index = self.index
series.name = list(self.columns)[partitions[0]]
yield (series.name, series)
return concat_iters(iters)
def iteritems(self):
"""Iterator over (column name, Series) pairs.
Note:
Returns the same thing as .items()
Returns:
A generator that iterates over the columns of the frame.
"""
return self.items()
def itertuples(self, index=True, name='Pandas'):
"""Iterate over DataFrame rows as namedtuples.
Args:
index (boolean, default True): If True, return the index as the
first element of the tuple.
name (string, default "Pandas"): The name of the returned
namedtuples or None to return regular tuples.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A tuple representing row data. See args for varying tuples.
"""
iters = ray.get([
_deploy_func.remote(
lambda df: list(df.itertuples(index=index, name=name)),
part) for part in self._row_partitions])
iters = itertools.chain.from_iterable(iters)
def _replace_index(row_tuple, idx):
# We need to use try-except here because
# isinstance(row_tuple, namedtuple) won't work.
try:
row_tuple = row_tuple._replace(Index=idx)
except AttributeError: # Tuple not namedtuple
row_tuple = (idx,) + row_tuple[1:]
return row_tuple
if index:
iters = itertools.starmap(_replace_index, zip(iters, self.index))
return iters
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""Join two or more DataFrames, or a DataFrame with a collection.
Args:
other: What to join this DataFrame with.
on: A column name to use from the left for the join.
how: What type of join to conduct.
lsuffix: The suffix to add to column names that match on left.
rsuffix: The suffix to add to column names that match on right.
sort: Whether or not to sort.
Returns:
The joined DataFrame.
"""
if on is not None:
raise NotImplementedError("Not yet.")
if isinstance(other, pd.Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
if on is not None:
index = self[on]
else:
index = self.index
new_index = index.join(other.index, how=how, sort=sort)
# Joining two empty DataFrames is fast, and error checks for us.
new_column_labels = pd.DataFrame(columns=self.columns) \
.join(pd.DataFrame(columns=other.columns),
lsuffix=lsuffix, rsuffix=rsuffix).columns
new_partition_num = max(len(self._block_partitions.T),
len(other._block_partitions.T))
# Join is a concat once we have shuffled the data internally.
# We shuffle the data by computing the correct order.
# Another important thing to note: We set the current self index
# to the index variable which may be 'on'.
new_self = np.array([
_reindex_helper._submit(args=tuple([index, new_index, 1,
new_partition_num] +
block.tolist()),
num_return_vals=new_partition_num)
for block in self._block_partitions.T])
new_other = np.array([
_reindex_helper._submit(args=tuple([other.index, new_index, 1,
new_partition_num] +
block.tolist()),
num_return_vals=new_partition_num)
for block in other._block_partitions.T])
# Append the blocks together (i.e. concat)
new_block_parts = np.concatenate((new_self, new_other)).T
# Default index in the case that on is set.
if on is not None:
new_index = None
# TODO join the two metadata tables for performance.
return DataFrame(block_partitions=new_block_parts,
index=new_index,
columns=new_column_labels)
else:
# This constraint carried over from Pandas.
if on is not None:
raise ValueError("Joining multiple DataFrames only supported"
" for joining on index")
# Joining the empty DataFrames with either index or columns is
# fast. It gives us proper error checking for the edge cases that
# would otherwise require a lot more logic.
new_index = pd.DataFrame(index=self.index).join(
[pd.DataFrame(index=obj.index) for obj in other],
how=how, sort=sort).index
new_column_labels = pd.DataFrame(columns=self.columns).join(
[pd.DataFrame(columns=obj.columns) for obj in other],
lsuffix=lsuffix, rsuffix=rsuffix).columns
new_partition_num = max([len(self._block_partitions.T)] +
[len(obj._block_partitions.T)
for obj in other])
new_self = np.array([
_reindex_helper._submit(args=tuple([self.index, new_index, 1,
new_partition_num] +
block.tolist()),
num_return_vals=new_partition_num)
for block in self._block_partitions.T])
new_others = np.array([_reindex_helper._submit(
args=tuple([obj.index, new_index, 1, new_partition_num] +
block.tolist()),
num_return_vals=new_partition_num
) for obj in other for block in obj._block_partitions.T])
# Append the columns together (i.e. concat)
new_block_parts = np.concatenate((new_self, new_others)).T
# TODO join the two metadata tables for performance.
return DataFrame(block_partitions=new_block_parts,
index=new_index,
columns=new_column_labels)
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def kurtosis(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def last(self, offset):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def last_valid_index(self):
"""Return index for last non-NA/null value.
Returns:
scalar: type of index
"""
return self._row_metadata.last_valid_index()
def le(self, other, axis='columns', level=None):
"""Checks element-wise that this is less than or equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the le over.
level: The Multilevel index level to apply le over.
Returns:
A new DataFrame filled with Booleans.
"""
return self._operator_helper(pd.DataFrame.le, other, axis, level)
def lookup(self, row_labels, col_labels):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def lt(self, other, axis='columns', level=None):
"""Checks element-wise that this is less than other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the lt over.
level: The Multilevel index level to apply lt over.
Returns:
A new DataFrame filled with Booleans.
"""
return self._operator_helper(pd.DataFrame.lt, other, axis, level)
def mad(self, axis=None, skipna=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def max(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
"""Perform max across the DataFrame.
Args:
axis (int): The axis to take the max on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The max of the DataFrame.
"""
def remote_func(df):
return df.max(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs)
return self._arithmetic_helper(remote_func, axis, level)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
"""Computes mean across the DataFrame.
Args:
axis (int): The axis to take the mean on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The mean of the DataFrame. (Pandas series)
"""
def remote_func(df):
return df.mean(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs)
return self._arithmetic_helper(remote_func, axis, level)
def median(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
"""Computes median across the DataFrame.
Args:
axis (int): The axis to take the median on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The median of the DataFrame. (Pandas series)
"""
def remote_func(df):
return df.median(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs)
return self._arithmetic_helper(remote_func, axis, level)
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def memory_usage(self, index=True, deep=False):
def remote_func(df):
return df.memory_usage(index=False, deep=deep)
result = self._arithmetic_helper(remote_func, axis=0)
result.index = self.columns
if index:
index_value = self._row_metadata.index.memory_usage(deep=deep)
return pd.Series(index_value, index=['Index']).append(result)
return result
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def min(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
"""Perform min across the DataFrame.
Args:
axis (int): The axis to take the min on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The min of the DataFrame.
"""
def remote_func(df):
return df.min(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs)
return self._arithmetic_helper(remote_func, axis, level)
def mod(self, other, axis='columns', level=None, fill_value=None):
"""Mods this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the mod against this.
axis: The axis to mod over.
level: The Multilevel index level to apply mod over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Mod applied.
"""
return self._operator_helper(pd.DataFrame.mod, other, axis, level,
fill_value)
def mode(self, axis=0, numeric_only=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mul(self, other, axis='columns', level=None, fill_value=None):
"""Multiplies this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
return self._operator_helper(pd.DataFrame.mul, other, axis, level,
fill_value)
def multiply(self, other, axis='columns', level=None, fill_value=None):
"""Synonym for mul.
Args:
other: The object to use to apply the multiply against this.
axis: The axis to multiply over.
level: The Multilevel index level to apply multiply over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Multiply applied.
"""
return self.mul(other, axis, level, fill_value)
def ne(self, other, axis='columns', level=None):
"""Checks element-wise that this is not equal to other.
Args:
other: A DataFrame or Series or scalar to compare to.
axis: The axis to perform the ne over.
level: The Multilevel index level to apply ne over.
Returns:
A new DataFrame filled with Booleans.
"""
return self._operator_helper(pd.DataFrame.ne, other, axis, level)
def nlargest(self, n, columns, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def notna(self):
"""Perform notna across the DataFrame.
Args:
None
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
new_block_partitions = np.array([_map_partitions(
lambda df: df.notna(), block) for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index)
def notnull(self):
"""Perform notnull across the DataFrame.
Args:
None
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
new_block_partitions = np.array([_map_partitions(
lambda df: df.notnull(), block)
for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index)
def nsmallest(self, n, columns, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def nunique(self, axis=0, dropna=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pipe(self, func, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pivot(self, index=None, columns=None, values=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def plot(self, x=None, y=None, kind='line', ax=None, subplots=False,
sharex=None, sharey=False, layout=None, figsize=None,
use_index=True, title=None, grid=None, legend=True, style=None,
logx=False, logy=False, loglog=False, xticks=None, yticks=None,
xlim=None, ylim=None, rot=None, fontsize=None, colormap=None,
table=False, yerr=None, xerr=None, secondary_y=False,
sort_columns=False, **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pop(self, item):
"""Pops an item from this DataFrame and returns it.
Args:
item (str): Column label to be popped
Returns:
A Series containing the popped values. Also modifies this
DataFrame.
"""
result = self[item]
del self[item]
return result
def pow(self, other, axis='columns', level=None, fill_value=None):
"""Pow this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the pow against this.
axis: The axis to pow over.
level: The Multilevel index level to apply pow over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Pow applied.
"""
return self._operator_helper(pd.DataFrame.pow, other, axis, level,
fill_value)
def prod(self, axis=None, skipna=None, level=None, numeric_only=None,
min_count=0, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def product(self, axis=None, skipna=None, level=None, numeric_only=None,
min_count=0, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""Return values at the given quantile over requested axis,
a la numpy.percentile.
Args:
q (float): 0 <= q <= 1, the quantile(s) to compute
axis (int): 0 or 'index' for row-wise,
1 or 'columns' for column-wise
interpolation: {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
Specifies which interpolation method to use
Returns:
quantiles : Series or DataFrame
If q is an array, a DataFrame will be returned where the
index is q, the columns are the columns of self, and the
values are the quantiles.
If q is a float, a Series will be returned where the
index is the columns of self and the values
are the quantiles.
"""
def quantile_helper(df, q, axis, numeric_only, interpolation):
try:
return df.quantile(q=q, axis=axis, numeric_only=numeric_only,
interpolation=interpolation)
except ValueError:
return pd.Series()
if isinstance(q, (pd.Series, np.ndarray, pd.Index, list)):
# In the case of a list, we build it one at a time.
# TODO Revisit for performance
quantiles = []
for q_i in q:
def remote_func(df):
return quantile_helper(df, q=q_i, axis=axis,
numeric_only=numeric_only,
interpolation=interpolation)
result = self._arithmetic_helper(remote_func, axis)
result.name = q_i
quantiles.append(result)
return pd.concat(quantiles, axis=1).T
else:
def remote_func(df):
return quantile_helper(df, q=q, axis=axis,
numeric_only=numeric_only,
interpolation=interpolation)
result = self._arithmetic_helper(remote_func, axis)
result.name = q
return result
def query(self, expr, inplace=False, **kwargs):
"""Queries the Dataframe with a boolean expression
Returns:
A new DataFrame if inplace=False
"""
self._validate_eval_query(expr, **kwargs)
columns = self.columns
def query_helper(df):
df = df.copy()
df.columns = columns
df.query(expr, inplace=True, **kwargs)
df.columns = pd.RangeIndex(0, len(df.columns))
return df
new_rows = _map_partitions(query_helper,
self._row_partitions)
if inplace:
self._update_inplace(row_partitions=new_rows)
else:
return DataFrame(row_partitions=new_rows, columns=self.columns)
def radd(self, other, axis='columns', level=None, fill_value=None):
return self.add(other, axis, level, fill_value)
def rank(self, axis=0, method='average', numeric_only=None,
na_option='keep', ascending=True, pct=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rdiv(self, other, axis='columns', level=None, fill_value=None):
return self._single_df_op_helper(
lambda df: df.rdiv(other, axis, level, fill_value),
other, axis, level)
def reindex(self, labels=None, index=None, columns=None, axis=None,
method=None, copy=True, level=None, fill_value=np.nan,
limit=None, tolerance=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rename(self, mapper=None, index=None, columns=None, axis=None,
copy=True, inplace=False, level=None):
"""Alters axes labels.
Args:
mapper, index, columns: Transformations to apply to the axis's
values.
axis: Axis to target with mapper.
copy: Also copy underlying data.
inplace: Whether to return a new DataFrame.
level: Only rename a specific level of a MultiIndex.
Returns:
If inplace is False, a new DataFrame with the updated axes.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
# We have to do this with the args because of how rename handles
# kwargs. It doesn't ignore None values passed in, so we have to filter
# them ourselves.
args = locals()
kwargs = {k: v for k, v in args.items()
if v is not None and k != "self"}
# inplace should always be true because this is just a copy, and we
# will use the results after.
kwargs['inplace'] = True
df_to_rename = pd.DataFrame(index=self.index, columns=self.columns)
df_to_rename.rename(**kwargs)
if inplace:
obj = self
else:
obj = self.copy()
obj.index = df_to_rename.index
obj.columns = df_to_rename.columns
if not inplace:
return obj
def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
axes_is_columns = axis == 1 or axis == "columns"
renamed = self if inplace else self.copy()
if axes_is_columns:
renamed.columns.name = mapper
else:
renamed.index.name = mapper
if not inplace:
return renamed
def _set_axis_name(self, name, axis=0, inplace=False):
"""Alter the name or names of the axis.
Args:
name: Name for the Index, or list of names for the MultiIndex
axis: 0 or 'index' for the index; 1 or 'columns' for the columns
inplace: Whether to modify `self` directly or return a copy
Returns:
Type of caller or None if inplace=True.
"""
axes_is_columns = axis == 1 or axis == "columns"
renamed = self if inplace else self.copy()
if axes_is_columns:
renamed.columns.set_names(name)
else:
renamed.index.set_names(name)
if not inplace:
return renamed
def reorder_levels(self, order, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad', axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,
label=None, convention='start', kind=None, loffset=None,
limit=None, base=0, on=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""Reset this index to default and create column from current index.
Args:
level: Only remove the given levels from the index. Removes all
levels by default
drop: Do not try to insert index into dataframe columns. This
resets the index to the default integer index.
inplace: Modify the DataFrame in place (do not create a new object)
col_level : If the columns have multiple levels, determines which
level the labels are inserted into. By default it is inserted
into the first level.
col_fill: If the columns have multiple levels, determines how the
other levels are named. If None then the index name is
repeated.
Returns:
A new DataFrame if inplace is False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, pd.PeriodIndex):
values = index.asobject.values
elif isinstance(index, pd.DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
# We're building a new default index dataframe for use later.
new_index = pd.RangeIndex(len(self))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if isinstance(self.index, pd.MultiIndex):
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, pd.MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index'
i = 0
while default in self:
default = 'level_{}'.format(i)
i += 1
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, pd.MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
def rfloordiv(self, other, axis='columns', level=None, fill_value=None):
return self._single_df_op_helper(
lambda df: df.rfloordiv(other, axis, level, fill_value),
other, axis, level)
def rmod(self, other, axis='columns', level=None, fill_value=None):
return self._single_df_op_helper(
lambda df: df.rmod(other, axis, level, fill_value),
other, axis, level)
def rmul(self, other, axis='columns', level=None, fill_value=None):
return self.mul(other, axis, level, fill_value)
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, on=None, axis=0, closed=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def round(self, decimals=0, *args, **kwargs):
new_block_partitions = np.array([_map_partitions(
lambda df: df.round(decimals=decimals, *args, **kwargs), block)
for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index)
def rpow(self, other, axis='columns', level=None, fill_value=None):
return self._single_df_op_helper(
lambda df: df.rpow(other, axis, level, fill_value),
other, axis, level)
def rsub(self, other, axis='columns', level=None, fill_value=None):
return self._single_df_op_helper(
lambda df: df.rsub(other, axis, level, fill_value),
other, axis, level)
def rtruediv(self, other, axis='columns', level=None, fill_value=None):
return self._single_df_op_helper(
lambda df: df.rtruediv(other, axis, level, fill_value),
other, axis, level)
def sample(self, n=None, frac=None, replace=False, weights=None,
random_state=None, axis=None):
"""Returns a random sample of items from an axis of object.
Args:
n: Number of items from axis to return. Cannot be used with frac.
Default = 1 if frac = None.
frac: Fraction of axis items to return. Cannot be used with n.
replace: Sample with or without replacement. Default = False.
weights: Default ‘None’ results in equal probability weighting.
If passed a Series, will align with target object on index.
Index values in weights not found in sampled object will be
ignored and index values in sampled object not in weights will
be assigned weights of zero. If called on a DataFrame, will
accept the name of a column when axis = 0. Unless weights are
a Series, weights must be same length as axis being sampled.
If weights do not sum to 1, they will be normalized to sum
to 1. Missing values in the weights column will be treated as
zero. inf and -inf values not allowed.
random_state: Seed for the random number generator (if int), or
numpy RandomState object.
axis: Axis to sample. Accepts axis number or name.
Returns:
A new Dataframe
"""
axis = pd.DataFrame()._get_axis_number(axis) if axis is not None \
else 0
if axis == 0:
axis_length = len(self._row_metadata)
else:
axis_length = len(self._col_metadata)
if weights is not None:
# Index of the weights Series should correspond to the index of the
# Dataframe in order to sample
if isinstance(weights, pd.Series):
weights = weights.reindex(self.axes[axis])
# If weights arg is a string, the weights used for sampling will
# the be values in the column corresponding to that string
if isinstance(weights, string_types):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a "
"valid column")
else:
raise ValueError("Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame")
weights = pd.Series(weights, dtype='float64')
if len(weights) != axis_length:
raise ValueError("Weights and axis to be sampled must be of "
"same length")
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative "
"values")
# weights cannot be NaN when sampling, so we must set all nan
# values to 0
weights = weights.fillna(0)
# If passed in weights are not equal to 1, renormalize them
# otherwise numpy sampling function will error
weights_sum = weights.sum()
if weights_sum != 1:
if weights_sum != 0:
weights = weights / weights_sum
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
if n is None and frac is None:
# default to n = 1 if n and frac are both None (in accordance with
# Pandas specification)
n = 1
elif n is not None and frac is None and n % 1 != 0:
# n must be an integer
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
# compute the number of samples based on frac
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
# Pandas specification does not allow both n and frac to be passed
# in
raise ValueError('Please enter a value for `frac` OR `n`, not '
'both')
if n < 0:
raise ValueError("A negative number of rows requested. Please "
"provide positive value.")
if n == 0:
# An Empty DataFrame is returned if the number of samples is 0.
# The Empty Dataframe should have either columns or index specified
# depending on which axis is passed in.
return DataFrame(columns=[] if axis == 1 else self.columns,
index=self.index if axis == 1 else [])
if axis == 1:
axis_labels = self.columns
partition_metadata = self._col_metadata
partitions = self._col_partitions
else:
axis_labels = self.index
partition_metadata = self._row_metadata
partitions = self._row_partitions
if random_state is not None:
# Get a random number generator depending on the type of
# random_state that is passed in
if isinstance(random_state, int):
random_num_gen = np.random.RandomState(random_state)
elif isinstance(random_state, np.random.randomState):
random_num_gen = random_state
else:
# random_state must be an int or a numpy RandomState object
raise ValueError("Please enter an `int` OR a "
"np.random.RandomState for random_state")
# choose random numbers and then get corresponding labels from
# chosen axis
sample_indices = random_num_gen.randint(
low=0,
high=len(partition_metadata),
size=n)
samples = axis_labels[sample_indices]
else:
# randomly select labels from chosen axis
samples = np.random.choice(a=axis_labels, size=n,
replace=replace, p=weights)
# create an array of (partition, index_within_partition) tuples for
# each sample
part_ind_tuples = [partition_metadata[sample]
for sample in samples]
if axis == 1:
# tup[0] refers to the partition number and tup[1] is the index
# within that partition
new_cols = [_deploy_func.remote(lambda df: df.iloc[:, [tup[1]]],
partitions[tup[0]]) for tup in part_ind_tuples]
return DataFrame(col_partitions=new_cols,
columns=samples,
index=self.index)
else:
new_rows = [_deploy_func.remote(lambda df: df.loc[[tup[1]]],
partitions[tup[0]]) for tup in part_ind_tuples]
return DataFrame(row_partitions=new_rows,
columns=self.columns,
index=samples)
def select(self, crit, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def select_dtypes(self, include=None, exclude=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sem(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def set_axis(self, labels, axis=0, inplace=None):
"""Assign desired index to given axis.
Args:
labels (pd.Index or list-like): The Index to assign.
axis (string or int): The axis to reassign.
inplace (bool): Whether to make these modifications inplace.
Returns:
If inplace is False, returns a new DataFrame, otherwise None.
"""
if is_scalar(labels):
warnings.warn(
'set_axis now takes "labels" as first argument, and '
'"axis" as named parameter. The old form, with "axis" as '
'first parameter and \"labels\" as second, is still supported '
'but will be deprecated in a future version of pandas.',
FutureWarning, stacklevel=2)
labels, axis = axis, labels
if inplace is None:
warnings.warn(
'set_axis currently defaults to operating inplace.\nThis '
'will change in a future version of pandas, use '
'inplace=True to avoid this warning.',
FutureWarning, stacklevel=2)
inplace = True
if inplace:
setattr(self, pd.DataFrame()._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""Set the DataFrame index using one or more existing columns.
Args:
keys: column label or list of column labels / arrays.
drop (boolean): Delete columns to be used as the new index.
append (boolean): Whether to append columns to existing index.
inplace (boolean): Modify the DataFrame in place.
verify_integrity (boolean): Check the new index for duplicates.
Otherwise defer the check until necessary. Setting to False
will improve the performance of this method
Returns:
If inplace is set to false returns a new DataFrame, otherwise None.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, pd.MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, pd.MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, pd.Series):
level = col._values
names.append(col.name)
elif isinstance(col, pd.Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, pd.Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = _ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def set_value(self, index, col, value, takeable=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def shift(self, periods=1, freq=None, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def skew(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def slice_shift(self, periods=1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def squeeze(self, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def stack(self, level=-1, dropna=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def std(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
"""Computes standard deviation across the DataFrame.
Args:
axis (int): The axis to take the std on.
skipna (bool): True to skip NA values, false otherwise.
ddof (int): degrees of freedom
Returns:
The std of the DataFrame (Pandas Series)
"""
def remote_func(df):
return df.std(axis=axis, skipna=skipna, level=level, ddof=ddof,
numeric_only=numeric_only, **kwargs)
return self._arithmetic_helper(remote_func, axis, level)
def sub(self, other, axis='columns', level=None, fill_value=None):
"""Subtract a DataFrame/Series/scalar from this DataFrame.
Args:
other: The object to use to apply the subtraction to this.
axis: THe axis to apply the subtraction over.
level: Mutlilevel index level to subtract over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the subtraciont applied.
"""
return self._operator_helper(pd.DataFrame.sub, other, axis, level,
fill_value)
def subtract(self, other, axis='columns', level=None, fill_value=None):
"""Alias for sub.
Args:
other: The object to use to apply the subtraction to this.
axis: THe axis to apply the subtraction over.
level: Mutlilevel index level to subtract over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the subtraciont applied.
"""
return self.sub(other, axis, level, fill_value)
def swapaxes(self, axis1, axis2, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def swaplevel(self, i=-2, j=-1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tail(self, n=5):
"""Get the last n rows of the dataframe.
Args:
n (int): The number of rows to return.
Returns:
A new dataframe with the last n rows of this dataframe.
"""
if n >= len(self._row_metadata):
return self
new_dfs = _map_partitions(lambda df: df.tail(n),
self._col_partitions)
index = self._row_metadata.index[-n:]
return DataFrame(col_partitions=new_dfs,
columns=self.columns,
index=index)
def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_clipboard(self, excel=None, sep=None, **kwargs):
warnings.warn("Defaulting to Pandas implementation",
PendingDeprecationWarning)
port_frame = to_pandas(self)
port_frame.to_clipboard(excel, sep, **kwargs)
def to_csv(self, path_or_buf=None, sep=',', na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=None, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
warnings.warn("Defaulting to Pandas implementation",
PendingDeprecationWarning)
port_frame = to_pandas(self)
port_frame.to_csv(path_or_buf, sep, na_rep, float_format,
columns, header, index, index_label,
mode, encoding, compression, quoting,
quotechar, line_terminator, chunksize,
tupleize_cols, date_format, doublequote,
escapechar, decimal)
def to_dense(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_dict(self, orient='dict', into=dict):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
warnings.warn("Defaulting to Pandas implementation",
PendingDeprecationWarning)
port_frame = to_pandas(self)
port_frame.to_excel(excel_writer, sheet_name, na_rep,
float_format, columns, header, index,
index_label, startrow, startcol, engine,
merge_cells, encoding, inf_rep, verbose,
freeze_panes)
def to_feather(self, fname):
warnings.warn("Defaulting to Pandas implementation",
PendingDeprecationWarning)
port_frame = to_pandas(self)
port_frame.to_feather(fname)
def to_gbq(self, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail',
private_key=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_hdf(self, path_or_buf, key, **kwargs):
warnings.warn("Defaulting to Pandas implementation",
PendingDeprecationWarning)
port_frame = to_pandas(self)
port_frame.to_hdf(path_or_buf, key, **kwargs)
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='np.NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, bold_rows=True, classes=None, escape=True,
max_rows=None, max_cols=None, show_dimensions=False,
notebook=False, decimal='.', border=None):
warnings.warn("Defaulting to Pandas implementation",
PendingDeprecationWarning)
port_frame = to_pandas(self)
port_frame.to_html(buf, columns, col_space, header,
index, na_rep, formatters,
float_format, sparsify, index_names,
justify, bold_rows, classes, escape,
max_rows, max_cols, show_dimensions,
notebook, decimal, border)
def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression=None):
warnings.warn("Defaulting to Pandas implementation",
PendingDeprecationWarning)
port_frame = to_pandas(self)
port_frame.to_json(path_or_buf, orient, date_format,
double_precision, force_ascii, date_unit,
default_handler, lines, compression)
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='np.NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None,
escape=None, encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
warnings.warn("Defaulting to Pandas implementation",
PendingDeprecationWarning)
port_frame = to_pandas(self)
port_frame.to_msgpack(path_or_buf, encoding, **kwargs)
def to_panel(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_parquet(self, fname, engine='auto', compression='snappy',
**kwargs):
warnings.warn("Defaulting to Pandas implementation",
PendingDeprecationWarning)
port_frame = to_pandas(self)
port_frame.to_parquet(fname, engine, compression, **kwargs)
def to_period(self, freq=None, axis=0, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_pickle(self, path, compression='infer',
protocol=pkl.HIGHEST_PROTOCOL):
warnings.warn("Defaulting to Pandas implementation",
PendingDeprecationWarning)
port_frame = to_pandas(self)
port_frame.to_pickle(path, compression, protocol)
def to_records(self, index=True, convert_datetime64=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_sparse(self, fill_value=None, kind='block'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_sql(self, name, con, flavor=None, schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
warnings.warn("Defaulting to Pandas implementation",
PendingDeprecationWarning)
port_frame = to_pandas(self)
port_frame.to_sql(name, con, flavor, schema, if_exists,
index, index_label, chunksize, dtype)
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding='latin-1', byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
warnings.warn("Defaulting to Pandas implementation",
PendingDeprecationWarning)
port_frame = to_pandas(self)
port_frame.to_stata(fname, convert_dates, write_index,
encoding, byteorder, time_stamp,
data_label, variable_labels)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='np.NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_xarray(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def transform(self, func, *args, **kwargs):
kwargs["is_transform"] = True
result = self.agg(func, *args, **kwargs)
try:
result.columns = self.columns
result.index = self.index
except ValueError:
raise ValueError("transforms cannot produce aggregated results")
return result
def truediv(self, other, axis='columns', level=None, fill_value=None):
"""Divides this DataFrame against another DataFrame/Series/scalar.
Args:
other: The object to use to apply the divide against this.
axis: The axis to divide over.
level: The Multilevel index level to apply divide over.
fill_value: The value to fill NaNs with.
Returns:
A new DataFrame with the Divide applied.
"""
return self._operator_helper(pd.DataFrame.truediv, other, axis, level,
fill_value)
def truncate(self, before=None, after=None, axis=None, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tshift(self, periods=1, freq=None, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tz_convert(self, tz, axis=0, level=None, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tz_localize(self, tz, axis=0, level=None, copy=True,
ambiguous='raise'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def unstack(self, level=-1, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def var(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
"""Computes variance across the DataFrame.
Args:
axis (int): The axis to take the variance on.
skipna (bool): True to skip NA values, false otherwise.
ddof (int): degrees of freedom
Returns:
The variance of the DataFrame.
"""
def remote_func(df):
return df.var(axis=axis, skipna=skipna, level=level, ddof=ddof,
numeric_only=numeric_only, **kwargs)
return self._arithmetic_helper(remote_func, axis, level)
def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def xs(self, key, axis=0, level=None, drop_level=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __getitem__(self, key):
"""Get the column specified by key for this DataFrame.
Args:
key : The column name.
Returns:
A Pandas Series representing the value for the column.
"""
key = com._apply_if_callable(key, self)
# shortcut if we are an actual column
is_mi_columns = isinstance(self.columns, pd.MultiIndex)
try:
if key in self.columns and not is_mi_columns:
return self._getitem_column(key)
except (KeyError, ValueError, TypeError):
pass
# see if we can slice the rows
indexer = self._row_metadata.convert_to_index_sliceable(key)
if indexer is not None:
return self._getitem_slice(indexer)
if isinstance(key, (pd.Series, np.ndarray, pd.Index, list)):
return self._getitem_array(key)
elif isinstance(key, DataFrame):
raise NotImplementedError("To contribute to Pandas on Ray, please"
"visit github.com/ray-project/ray.")
# return self._getitem_frame(key)
elif is_mi_columns:
raise NotImplementedError("To contribute to Pandas on Ray, please"
"visit github.com/ray-project/ray.")
# return self._getitem_multilevel(key)
else:
return self._getitem_column(key)
def _getitem_column(self, key):
# may result in multiple columns?
partition = self._col_metadata[key, 'partition']
result = ray.get(self._getitem_indiv_col(key, partition))
result.name = key
result.index = self.index
return result
def _getitem_array(self, key):
if com.is_bool_indexer(key):
if isinstance(key, pd.Series) and \
not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning, stacklevel=3)
elif len(key) != len(self.index):
raise ValueError('Item wrong length {} instead of {}.'.format(
len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
new_parts = _map_partitions(lambda df: df[key],
self._col_partitions)
columns = self.columns
index = self.index[key]
return DataFrame(col_partitions=new_parts,
columns=columns,
index=index)
else:
columns = self._col_metadata[key].index
indices_for_rows = [self.columns.index(new_col)
for new_col in columns]
new_parts = [_deploy_func.remote(
lambda df: df.__getitem__(indices_for_rows),
part) for part in self._row_partitions]
index = self.index
return DataFrame(row_partitions=new_parts,
columns=columns,
index=index)
def _getitem_indiv_col(self, key, part):
loc = self._col_metadata[key]
if isinstance(loc, pd.Series):
index = loc[loc['partition'] == part]
else:
index = loc[loc['partition'] == part]['index_within_partition']
return _deploy_func.remote(
lambda df: df.__getitem__(index),
self._col_partitions[part])
def _getitem_slice(self, key):
new_cols = _map_partitions(lambda df: df[key],
self._col_partitions)
index = self.index[key]
return DataFrame(col_partitions=new_cols,
index=index,
columns=self.columns)
def __getattr__(self, key):
"""After regular attribute access, looks up the name in the columns
Args:
key (str): Attribute name.
Returns:
The value of the attribute.
"""
try:
return object.__getattribute__(self, key)
except AttributeError as e:
if key in self.columns:
return self[key]
raise e
def __setitem__(self, key, value):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __len__(self):
"""Gets the length of the dataframe.
Returns:
Returns an integer length of the dataframe object.
"""
return len(self._row_metadata)
def __unicode__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __invert__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __hash__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __iter__(self):
"""Iterate over the columns
Returns:
An Iterator over the columns of the dataframe.
"""
return iter(self.columns)
def __contains__(self, key):
"""Searches columns for specific key
Args:
key : The column name
Returns:
Returns a boolean if the specified key exists as a column name
"""
return self.columns.__contains__(key)
def __nonzero__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __bool__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __abs__(self):
"""Creates a modified DataFrame by taking the absolute value.
Returns:
A modified DataFrame
"""
return self.abs()
def __round__(self, decimals=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __array__(self, dtype=None):
# TODO: This is very inefficient and needs fix
return np.array(to_pandas(self))
def __array_wrap__(self, result, context=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __getstate__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __setstate__(self, state):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __delitem__(self, key):
"""Delete a column by key. `del a[key]` for example.
Operation happens in place.
Notes: This operation happen on row and column partition
simultaneously. No rebuild.
Args:
key: key to delete
"""
# Create helper method for deleting column(s) in row partition.
def del_helper(df, to_delete):
cols = df.columns[to_delete] # either int or an array of ints
if not is_list_like(cols):
cols = [cols]
for col in cols:
df.__delitem__(col)
# Reset the column index to conserve space
df.columns = pd.RangeIndex(0, len(df.columns))
return df
to_delete = self.columns.get_loc(key)
self._row_partitions = _map_partitions(
del_helper, self._row_partitions, to_delete)
# This structure is used to get the correct index inside the partition.
del_df = self._col_metadata[key]
# We need to standardize between multiple and single occurrences in the
# columns. Putting single occurrences in a pd.DataFrame and transposing
# results in the same structure as multiple with 'loc'.
if isinstance(del_df, pd.Series):
del_df = pd.DataFrame(del_df).T
# Cast cols as pd.Series as duplicate columns mean result may be
# np.int64 or pd.Series
col_parts_to_del = \
pd.Series(self._col_metadata[key, 'partition']).unique()
self._col_metadata.drop(key)
for i in col_parts_to_del:
# Compute the correct index inside the partition to delete.
to_delete_in_partition = \
del_df[del_df['partition'] == i]['index_within_partition']
self._col_partitions[i] = _deploy_func.remote(
del_helper, self._col_partitions[i], to_delete_in_partition)
self._col_metadata.reset_partition_coords(col_parts_to_del)
def __finalize__(self, other, method=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __copy__(self, deep=True):
"""Make a copy using Ray.DataFrame.copy method
Args:
deep: Boolean, deep copy or not.
Currently we do not support deep copy.
Returns:
A Ray DataFrame object.
"""
return self.copy(deep=deep)
def __deepcopy__(self, memo=None):
"""Make a -deep- copy using Ray.DataFrame.copy method
This is equivalent to copy(deep=True).
Args:
memo: No effect. Just to comply with Pandas API.
Returns:
A Ray DataFrame object.
"""
return self.copy(deep=True)
def __and__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __or__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __xor__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __lt__(self, other):
return self.lt(other)
def __le__(self, other):
return self.le(other)
def __gt__(self, other):
return self.gt(other)
def __ge__(self, other):
return self.ge(other)
def __eq__(self, other):
return self.eq(other)
def __ne__(self, other):
return self.ne(other)
def __add__(self, other):
return self.add(other)
def __iadd__(self, other):
return self.add(other)
def __radd__(self, other, axis="columns", level=None, fill_value=None):
return self.radd(other, axis, level, fill_value)
def __mul__(self, other):
return self.mul(other)
def __imul__(self, other):
return self.mul(other)
def __rmul__(self, other, axis="columns", level=None, fill_value=None):
return self.rmul(other, axis, level, fill_value)
def __pow__(self, other):
return self.pow(other)
def __ipow__(self, other):
return self.pow(other)
def __rpow__(self, other, axis="columns", level=None, fill_value=None):
return self.rpow(other, axis, level, fill_value)
def __sub__(self, other):
return self.sub(other)
def __isub__(self, other):
return self.sub(other)
def __rsub__(self, other, axis="columns", level=None, fill_value=None):
return self.rsub(other, axis, level, fill_value)
def __floordiv__(self, other):
return self.floordiv(other)
def __ifloordiv__(self, other):
return self.floordiv(other)
def __rfloordiv__(self, other, axis="columns", level=None,
fill_value=None):
return self.rfloordiv(other, axis, level, fill_value)
def __truediv__(self, other):
return self.truediv(other)
def __itruediv__(self, other):
return self.truediv(other)
def __rtruediv__(self, other, axis="columns", level=None, fill_value=None):
return self.rtruediv(other, axis, level, fill_value)
def __mod__(self, other):
return self.mod(other)
def __imod__(self, other):
return self.mod(other)
def __rmod__(self, other, axis="columns", level=None, fill_value=None):
return self.rmod(other, axis, level, fill_value)
def __div__(self, other, axis="columns", level=None, fill_value=None):
return self.div(other, axis, level, fill_value)
def __rdiv__(self, other, axis="columns", level=None, fill_value=None):
return self.rdiv(other, axis, level, fill_value)
def __neg__(self):
"""Computes an element wise negative DataFrame
Returns:
A modified DataFrame where every element is the negation of before
"""
for t in self.dtypes:
if not (is_bool_dtype(t)
or is_numeric_dtype(t)
or is_timedelta64_dtype(t)):
raise TypeError("Unary negative expects numeric dtype, not {}"
.format(t))
new_block_partitions = np.array([_map_partitions(
lambda df: df.__neg__(), block)
for block in self._block_partitions])
return DataFrame(block_partitions=new_block_partitions,
columns=self.columns,
index=self.index)
def __sizeof__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def __doc__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def blocks(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def style(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def iat(self, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def loc(self):
"""Purely label-location based indexer for selection by label.
We currently support: single label, list array, slice object
We do not support: boolean array, callable
"""
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def is_copy(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def at(self, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ix(self, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def iloc(self):
"""Purely integer-location based indexing for selection by position.
We currently support: single label, list array, slice object
We do not support: boolean array, callable
"""
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def _copartition(self, other, new_index):
"""Colocates the values of other with this for certain operations.
NOTE: This method uses the indexes of each DataFrame to order them the
same. This operation does an implicit shuffling of data and zips
the two DataFrames together to be operated on.
Args:
other: The other DataFrame to copartition with.
Returns:
Two new sets of partitions, copartitioned and zipped.
"""
# Put in the object store so they aren't serialized each iteration.
old_self_index = ray.put(self.index)
new_index = ray.put(new_index)
old_other_index = ray.put(other.index)
new_num_partitions = max(len(self._block_partitions.T),
len(other._block_partitions.T))
new_partitions_self = \
np.array([_reindex_helper._submit(
args=tuple([old_self_index, new_index, 1,
new_num_partitions] + block.tolist()),
num_return_vals=new_num_partitions)
for block in self._block_partitions.T]).T
new_partitions_other = \
np.array([_reindex_helper._submit(
args=tuple([old_other_index, new_index, 1,
new_num_partitions] + block.tolist()),
num_return_vals=new_num_partitions)
for block in other._block_partitions.T]).T
return zip(new_partitions_self, new_partitions_other)
def _operator_helper(self, func, other, axis, level, *args):
"""Helper method for inter-dataframe and scalar operations"""
if isinstance(other, DataFrame):
return self._inter_df_op_helper(
lambda x, y: func(x, y, axis, level, *args),
other, axis, level)
else:
return self._single_df_op_helper(
lambda df: func(df, other, axis, level, *args),
other, axis, level)
def _inter_df_op_helper(self, func, other, axis, level):
if level is not None:
raise NotImplementedError("Mutlilevel index not yet supported "
"in Pandas on Ray")
axis = pd.DataFrame()._get_axis_number(axis)
# Adding two DataFrames causes an outer join.
if isinstance(other, DataFrame):
new_column_index = self.columns.join(other.columns, how="outer")
new_index = self.index.join(other.index, how="outer")
copartitions = self._copartition(other, new_index)
new_blocks = \
np.array([_co_op_helper._submit(
args=tuple([func, self.columns, other.columns,
len(part[0])] +
np.concatenate(part).tolist()),
num_return_vals=len(part[0]))
for part in copartitions])
# TODO join the Index Metadata objects together for performance.
return DataFrame(block_partitions=new_blocks,
columns=new_column_index,
index=new_index)
def _single_df_op_helper(self, func, other, axis, level):
if level is not None:
raise NotImplementedError("Multilevel index not yet supported "
"in Pandas on Ray")
axis = | pd.DataFrame() | pandas.DataFrame |
# coding: utf-8
# * Build baseline model
# * Example of leaky variables:
# * Missing Data
# * Example of New categorical variables
# * Features not available in production, only in training
# * Outliers
# * Blacklist variables
#
#
# * Example of overfitting
# * Multi-collinearity of variables in linear & NN models
# * Variance Analysis
# * Feature Engineering transformation compute time
# ## Define Business Objective
#
# Help an airline carrier & it's passengers know whether a flight will be delayed 24 hours in advance.
#
# ## Define Success
#
# Manual or automated decision making system?
#
# How to represent expected output (True: flight delayed / False: flight not delayed)?
#
# ## Define Cost of Errors (Bad Prediction)?
#
# 
# ## Airline On-Time Performance Data Dictionary
#
# |Column | Description | Type |Questions/Comments |
# |:-:|---|---|---|
# | Year | year of the flight | Integer | |
# | Month | month of the flight | Integer | |
# | DayofMonth | day of the month (1 to 31) | Integer | |
# | DayOfWeek | day of the week | Integer | |
# | DepTime | actual departure time | Float | Is this available 24 hours prior to departure (i.e. time of prediction)? |
# | CRSDepTime | scheduled departure time | Integer | Is this available 24 hours prior to departure (i.e. time of prediction)? |
# | ArrTime | actual arrival time | Float | Is this info available during time of prediction? |
# | CRSArrTime | scheduled arrival time | Integer | Is this info available during time of prediction? How likely is it to change? |
# | UniqueCarrier | carrier ID | String | Why would this matter? |
# | FlightNum | flight number | Integer | How are flight numbers assigned? |
# | TailNum | plane's tail number | String | How are tail numbers assigned & why would that matter? What happens if this plane is decomissioned? |
# | ActualElapsedTime | actual elapsed time of the flight, in minutes | Float | Is this info available during time of prediction? What happens if we include this variable in the model? |
# | CRSElapsedTime | scheduled elapsed time of the flight, in minutes | Float | Is this info available during time of prediction? How likely is it to change? |
# | AirTime | airborne time for the flight, in minutes | Float | Is this info available during time of prediction? |
# | ArrDelay | arrival delay, in minutes | Float | Is this info available during time of prediction? |
# | DepDelay | departure delay, in minutes | Float | Is this info available during time of prediction? |
# | Origin | originating airport | String | How likely is this to change? |
# | Dest | destination airport | String | How likely is this to change? |
# | Distance | flight distance | Float | How likely is this to change? |
# | TaxiIn | taxi time from wheels down to arrival at the gate, in minutes | Float | Is this info available during time of prediction? |
# | TaxiOut | taxi time from departure from the gate to wheels up, in minutes | Float | Is this info available during time of prediction? |
# | Cancelled | cancellation status (stored as logical). | Integer | Should we bother predicting whether flight is delayed or not for a cancelled flight? |
# | CancellationCode | cancellation code, if applicable | String | Should we bother predicting whether flight is delayed or not for a cancelled flight? |
# | Diverted | diversion status | Integer | Is this info available during time of prediction? |
# | CarrierDelay | delay, in minutes, attributable to the carrier | Float | |
# | WeatherDelay | delay, in minutes, attributable to weather factors | Float | Weather predictions are available 24 hour in advance. Will you still include this variable if the model is expected run 48 hours instead of 24 hours in advance? How about if model expected to run 4 hours instead of 24 hours in advance? |
# | NASDelay | delay, in minutes, attributable to the National Aviation System | Float | How far in advance do we know about national aviation delays? Consult domain expert. |
# | SecurityDelay | delay, in minutes, attributable to security factors | Float | How far in advance do we know about security delays? Consult domain expert. |
# | LateAircraftDelay | delay, in minutes, attributable to late-arriving aircraft | Float | How far in advance do we know about security delays? Consult domain expert. |
# | IsArrDelayed | represents whether flight arrival was delayed or not | String | How was this generated? How is delayed define (in terms of mins)? Should you trust this? |
# | IsDepDelayed | represents whether flight departure was delayed or not | String | How was this generated? How is delayed define (in terms of mins)? Should you trust this? |
#
#
# *note*: Determine what unit time is representd in? Local (PST, CT, EST) or Universal (UTC)? If not universal, we'll have to normalize time to a universal standard.
# ### Variables Not to be used for training a ML model: todo
# Not all variables available in the dataset should be used during training. Here is a list of questions to help you figure out which variables to exclude from the training production.
#
#
# 1. Is the variable available during time of inference (i.e. production prediction)? You'll want to first know when you'll be making a prediction?
# 1. Do you know if a plane will arrive late prior to taking off?
#
#
# 2. In some regulated industries, some variables are illegal to use for predictive modeling.
# 1. For example, personally identifiable information (PII) is one such example.
#
#
# 3. How likely is the variable available in production?
# 1. Determine a threshold for how available you expect a variable to be available during time of inference and remove variables which exceed that threshold.
# ## Supervised Learning Pipeline
# Here is a general end to end pipeline for a data science project.
#
# 1. Define Business Objective & Criteria for Success
# + Experimental Design
# + Identify the business/product objective
# + Identify & hypothesize goals and criteria for success
# + Create a set of questions for identifying correct data set
# + Define which machine learning evaluation metric will be used to quantify quality of predictions
# + Identify data sources, time window of data collected, data formats, data dictionary, features, target & evaluation metric
# 2. Data Aquisition
# + Define what/how much data we need, where it lives, what format it's in & load dataset
# + Import data from local or remote data source & determine the most approperiate tools to work with the data
# + Pandas has functions for common open source data formats including data base connectors for MySQL & PostgreSQL
# + Use Spark for Big Data
# + Gather/Read any documentation available for the data (schema, data dictionary)
# + Load and pre-process the data into a representation which is ready for model training
# + If the data is available in an open source data format (JSON, CSV, XML, EXCEL), you'll be able to leverage open source tools
# + If the data is available in a closed source format(fixed formatted row) then you will need to develop a parser to format the data into approperiate columns
# + Ensure correct data types are imputed
# + Look at the values. Ensure they make sense in the context of each column
# + Look for missing/empty values
# + For categorical fields, what are the unique values in the field?
# + For numeric fields, are all values numbers?
# + Split-out validation dataset
# 3. Exploratory Data Analysis
# + Gather insights by using exploratory methods, descriptive & inferential statistics
# + Find median, mode, std dev, min, max, average for each column. Do these make sense in the context of the column?
# + Do financial values have reasonable upper bounds?
# + Univariate feature distributions (to observe stability & other patterns of a given feature like skew)
# + Feature & target correlations
# + Target analysis (plot of feature vs target)
# + Are there any outliers?
# + Do the column values seem to follow a normal distribution? Uniform? Exponential (i.e. long tail)? If exponential, taking log(X) may be beneficial for linear regerssion.
# 4. Feature Engineering
# + Perform feature scaling / normalization
# + Inject domain knowledge (structure) into the data by adding or modifying existing columns
# + Linear combinations of two or more features (ratios or other arithmetic variations)
# + Adding new columns for day of year, hour of day from a datetime column
# + Convert categorical data into numerical values using one-hot encoding
# 5. Feature Selection
# + Drop highly correlated features (see correlation section above)
# + PCA
# + Recusive Feature Elimination
# + Regularization method using LASSO
# 6. Select, build & evaluate the model
# + Establish a baseline model for comparison
# + Spot Check & Compare Algorithms
# + Run a spot check of single model performance & tune the top 3 best performing learners
# + Evaluate Algorithms with Standardization
# + Improve accuracy
# + You may generally find ensemble Methods (such as Bagging and Boosting, Gradient Boosting) to be quite useful
# 7. Refine the model (Hyper-parameter tuning)
# + Use GridSearch to search & tune hyper-parameters
# 9. Finalize Model (use all training data and confirm using validation dataset)
# + Save model binary along with model training results
# + Predictions on validation dataset
# 10. Communicate the results
# + Summarize findings with narrative, storytelling techniques
# + Present limitations, assumptions of your analysis
# + Identify follow-up problems and questions for future analysis
# In[1]:
#load libraries
from __future__ import print_function
import math
import numpy as np
from IPython import display
print('numpy: {}'.format(np.__version__))
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
print('pandas: {}'.format(pd.__version__))
import sklearn
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import Imputer
from sklearn.metrics import mean_absolute_error
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, auc, recall_score, precision_score
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import average_precision_score
print('sklearn: {}'.format(sklearn.__version__))
import xgboost as xgb
from xgboost import plot_importance
print('xgboost: {}'.format(xgb.__version__))
import joblib
import pickle
pd.options.display.max_rows = 40
pd.options.display.float_format = '{:.1f}'.format
seed = 7
OUTPUT_DIR="../data/processed/"
# In[2]:
# load data
def load_data(location, data_format="csv"):
if(data_format=="csv"):
df = pd.read_csv(location, encoding="ISO-8859-1", low_memory=False)
df = df.reindex(
np.random.permutation(df.index))
else:
print("{} format not currently supported".format(data_format))
return df
airlines_df = load_data("https://s3.amazonaws.com/h2o-airlines-unpacked/allyears2k.csv")
# preview data
airlines_df.head()
# Observe columns available in the dataset...
# In[3]:
airlines_df.columns
# In[4]:
airlines_df.describe()
# In[5]:
# dataset size
airlines_df.shape
# #### Target Analysis
# Check if any instances don't contain a label...
# In[6]:
airlines_df["IsDepDelayed"].isnull().sum()
# In[7]:
airlines_df["IsDepDelayed"].value_counts()
# In[8]:
y = airlines_df.IsDepDelayed
y.head()
# In[9]:
cols_not_to_use = ["DepTime", "ArrTime", "TailNum", "ActualElapsedTime", "AirTime", "ArrDelay",
"DepDelay", "TaxiIn", "TaxiOut", "CancellationCode", "Diverted", "CarrierDelay",
"WeatherDelay", "NASDelay", "SecurityDelay", "LateAircraftDelay", "IsArrDelayed", "IsDepDelayed"]
cols_to_use = ["Year", "Month", "DayofMonth", "DayOfWeek", "CRSDepTime",
"CRSArrTime", "UniqueCarrier", "FlightNum", "CRSElapsedTime",
"Origin", "Dest", "Distance", "Cancelled"]
assert(len(cols_not_to_use) + len(cols_to_use) == airlines_df.shape[1])
def get_training_data(df, cols_not_to_use):
print("\nGet Training Data...")
print("Original shape: {}".format(df.shape))
df = df.drop(cols_not_to_use, axis=1, errors='ignore')
print("After columns dropped shape: {}".format(df.shape))
return df
def label_encode_target(df, target):
print("\nLabel Encode Target into Integers...")
# encode string class values as integers
y = df[target]
label_encoder = LabelEncoder()
label_encoder = label_encoder.fit(y)
label_encoded_y = label_encoder.transform(y)
return label_encoded_y
def naive_one_hot_encode(df, cols_to_encode=[]):
print("\nNaive One-Hot-Encode for features: {}".format(cols_to_encode))
print("\nTotal number of features before encoding: {}".format(df.shape[1]))
for col in cols_to_encode:
# use pd.concat to join the new columns with your original dataframe
df = pd.concat([df,pd.get_dummies(df[col], prefix=col+"_")],axis=1)
df = df.drop(col,axis=1)
print("\nTotal number of features after encoding: {}".format(df.shape[1]))
return df
# In[10]:
# this method call is not idempotent (can't delete target more than once)
label_encoded_y = label_encode_target(airlines_df, "IsDepDelayed")
X = get_training_data(airlines_df, cols_not_to_use)
assert(len(X.columns) == len(cols_to_use))
X = naive_one_hot_encode(X, ['UniqueCarrier','Dest','Origin'])
# train / test split
training_examples, test_examples, training_targets, test_targets = train_test_split(X, label_encoded_y, test_size=0.30)
# In[11]:
columns_of_interest = ["DayofMonth", "Year", "DayOfWeek", "Month", "Distance", "FlightNum", "Origin", "Dest", "UniqueCarrier"]
X = airlines_df[columns_of_interest]
# use pd.concat to join the new columns with your original dataframe
X = pd.concat([X,pd.get_dummies(X['UniqueCarrier'], prefix='carrier_')],axis=1)
X = pd.concat([X, | pd.get_dummies(X['Dest'], prefix='dest_') | pandas.get_dummies |
import pandas as pd
import numpy as np
from pathlib import Path
def load(path, dt=False, stats=False):
print("loading data from",path)
dataFrames = {}
dataFrames['gameLogs'] = pd.read_csv(path/'GameLogs.csv', index_col=False)
if dt:
dataFrames['gameLogs']['Date'] = pd.to_datetime(dataFrames['gameLogs']['Date'])
dataFrames['people'] = pd.read_csv(path/'People.csv', index_col=False)
dataFrames['teams'] = pd.read_csv(path/'Teams.csv', index_col=False)
dataFrames['managers'] = pd.read_csv(path/'Managers.csv', index_col=False)
dataFrames['fieldings'] = pd.read_csv(path/'Fielding.csv', index_col=False)
dataFrames['pitchings'] = pd.read_csv(path/'Pitching.csv', index_col=False)
dataFrames['battings'] = pd.read_csv(path/'Batting.csv', index_col=False)
if stats:
dataFrames['stats'] = pd.read_csv(path/'Stats.csv', index_col=False)
print("data loaded")
return dataFrames
def save(path, dataFrames, stats=False):
print("Saving data to",path)
dataFrames['gameLogs'].to_csv(path/'GameLogs.csv', index = False)
dataFrames['people'].to_csv(path/'People.csv', index = False)
dataFrames['teams'].to_csv(path/'Teams.csv', index = False)
dataFrames['managers'].to_csv(path/'Managers.csv', index = False)
dataFrames['fieldings'].to_csv(path/'Fielding.csv', index = False)
dataFrames['pitchings'].to_csv(path/'Pitching.csv', index = False)
dataFrames['battings'].to_csv(path/'Batting.csv', index = False)
if stats:
dataFrames['stats'].to_csv(path/'Stats.csv', index = False)
print("Data saved")
def filter(path, saveState=True):
def filterFrame(frame, columns, renames=None):
frame = frame[columns]
if(renames!=None):
frame = frame.rename(columns=renames)
return frame.reset_index(drop=True)
def filterGameLogs(gameLogs, people):
gameLogs['Date'] = pd.to_datetime(gameLogs['Date'], format="%Y%m%d")
gameLogs['Visiting league AL'] = gameLogs['Visiting league']=="AL"
gameLogs['Home league AL'] = gameLogs['Home league']=="AL"
gameLogs = gameLogs[gameLogs['Forfeit information'].isna()]
gameLogs = gameLogs[gameLogs['Protest information'].isna()]
generalColumns = [
'Date','Visiting: Team','Visiting league AL','Home: Team','Home league AL','Visiting: Score','Home: Score']
visitingStatsColumns = [
'Visiting at-bats','Visiting hits','Visiting doubles','Visiting triples','Visiting homeruns','Visiting RBI','Visiting sacrifice hits','Visiting sacrifice flies',
'Visiting hit-by-pitch','Visiting walks','Visiting intentional walks','Visiting strikeouts','Visiting stolen bases','Visiting caught stealing','Visiting grounded into double plays',
'Visiting left on base','Visiting pitchers used','Visiting individual earned runs','Visiting team earned runs','Visiting wild pitches',
'Visiting balks','Visiting putouts','Visiting assists','Visiting errors','Visiting passed balls','Visiting double plays','Visiting triple plays']
homeStatsColumns = [
'Home at-bats','Home hits','Home doubles','Home triples','Home homeruns','Home RBI','Home sacrifice hits','Home sacrifice flies',
'Home hit-by-pitch','Home walks','Home intentional walks','Home strikeouts','Home stolen bases','Home caught stealing','Home grounded into double plays',
'Home left on base','Home pitchers used','Home individual earned runs','Home team earned runs','Home wild pitches',
'Home balks','Home putouts','Home assists','Home errors','Home passed balls','Home double plays','Home triple plays']
visitingIDColumns = [
'Visiting team manager ID','Visiting starting pitcher ID',
'Visiting starting player 1 ID','Visiting starting player 2 ID','Visiting starting player 3 ID',
'Visiting starting player 4 ID','Visiting starting player 5 ID','Visiting starting player 6 ID',
'Visiting starting player 7 ID','Visiting starting player 8 ID','Visiting starting player 9 ID']
homeIDColumns = [
'Home team manager ID','Home starting pitcher ID',
'Home starting player 1 ID','Home starting player 2 ID','Home starting player 3 ID',
'Home starting player 4 ID','Home starting player 5 ID','Home starting player 6 ID',
'Home starting player 7 ID','Home starting player 8 ID','Home starting player 9 ID']
identifier = people[['playerID','retroID']].drop_duplicates(subset=['retroID']).dropna()
for column in visitingIDColumns+homeIDColumns:
merged = | pd.merge(gameLogs[column], identifier, left_on=column, right_on='retroID', how="left") | pandas.merge |
#!/usr/bin/env python
import logging
import os
import importlib
import sys
import pickle
import numpy as np
import pandas as pd
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.models import load_model
from scipy.stats import spearmanr
from keras.layers import Input
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
import h2o
import feature_imp
import utils
import process_features
import models
# setting nvidia gpu environment
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
# Setting the correct config file
config_path = ".".join(["models", sys.argv[1]]) + "." if len(sys.argv) >= 2 else ""
config = importlib.import_module(config_path+"config")
attention_setting = importlib.import_module(config_path+"attention_setting")
# Setting up log file
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(name)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S')
fh = logging.FileHandler(config.run_specific_log, mode='a')
fh.setFormatter(fmt=formatter)
logger = logging.getLogger("Recurrent neural network")
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
# output will be the average spearman correlation
def get_prediction_group(model, index_list, unique_list, output_data, test_data):
total_per = 0
prediction = []
num_of_unuse_index = 0
for test_index in index_list:
new_index = [i for i in test_index if i in unique_list]
cur_prediction = model.predict(x = [data[new_index, :] for data in test_data])
cur_per = spearmanr(cur_prediction, output_data[new_index, :])[0]
prediction.extend(list(cur_prediction))
if len(new_index) <= 2:
num_of_unuse_index += 1
continue
total_per += cur_per
print(num_of_unuse_index)
return total_per / float(len(index_list)-num_of_unuse_index), pd.Series(prediction)
def get_prediction_regular(model, test_index, unique_list, output_data, test_data):
prediction = model.predict(x=[data[unique_list, :] for data in test_data])
#prediction = prediction.round(2)
performance = spearmanr(prediction, output_data[unique_list, :])[0]
return performance, pd.Series(list(prediction))
def ml_train(X, extra_crispr_df, y, train_index, test_index):
logger.debug("Creating h2o working env")
# ### Start H2O
# Start up a 1-node H2O cloud on your local machine, and allow it to use all CPU cores and up to 2GB of memory:
h2o.init(max_mem_size="6G")
h2o.remove_all()
logger.debug("Created h2o working env successfully")
from h2o.estimators import H2ORandomForestEstimator
rf_crispr = H2ORandomForestEstimator(
model_id="rf_crispr",
categorical_encoding="enum",
nfolds=5,
ntrees=300,
# max_depth = 20,
# nbins = 20,
stopping_rounds=30,
score_each_iteration=True,
seed=100000)
'''
rf_crispr = H2OXGBoostEstimator(
model_id="rf_crispr",
categorical_encoding="enum",
nfolds=5,
ntrees=300,
stopping_rounds=30,
score_each_iteration=True,
seed=1000000)
'''
seq_data = X.iloc[:, :config.seq_len]
seq_data.columns = ['pos_' + str(i) for i in range(len(seq_data.columns))]
pre_h2o_df = pd.concat([seq_data, extra_crispr_df, y], axis=1)
h2o_crispr_df_train = h2o.H2OFrame(pre_h2o_df.loc[train_index, :])
h2o_crispr_df_test = h2o.H2OFrame(pre_h2o_df.loc[test_index, :])
logger.debug("Training machine learning model")
rf_crispr.train(x=h2o_crispr_df_train.col_names[:-1], y=h2o_crispr_df_train.col_names[-1],
training_frame=h2o_crispr_df_train)
logger.debug("Trained successfully. Output feature importance")
feature_importance = rf_crispr._model_json['output']['variable_importances'].as_data_frame()[
['variable', 'percentage']]
feature_importance.to_csv(config.feature_importance_path, index=False)
logger.debug("Predicting training data")
test_prediction_train = rf_crispr.predict(h2o_crispr_df_train[:-1])
performance = spearmanr(test_prediction_train.as_data_frame()['predict'], h2o_crispr_df_train.as_data_frame()['log2fc'])[0]
logger.debug("spearman correlation coefficient for training dataset is: %f" % performance)
logger.debug("Predicting test data")
test_prediction = rf_crispr.predict(h2o_crispr_df_test[:-1])
performance = spearmanr(test_prediction.as_data_frame()['predict'], h2o_crispr_df_test.as_data_frame()['log2fc'])[0]
logger.debug("spearman correlation coefficient for training dataset is: %f" % performance)
logger.debug("Saving model")
h2o.save_model(rf_crispr, config.ml_model_path)
logger.debug("Saved model to disk")
def run():
logger.debug("Reading in the crispr dataset %s" % config.input_dataset)
crispr = | pd.read_csv(config.input_dataset) | pandas.read_csv |
import streamlit as st
import pandas as pd
import numpy as np
import base64
import re
import plotly.graph_objects as go
import plotly.express as px
# import seaborn as sns
# import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_diabetes
# Functions ............................................................................................................
collect_numbers = lambda x: [float(i) for i in re.split(',+', x) if i != ""]
collect_numbers_int = lambda x: [int(i) for i in re.split(',+', x) if i != ""]
def filedownload(df):
"""
filedownload function converts the dataframe df into csv file and downloads it.
:param df: dataframe containing max_feature, n_estimators, R^2.
"""
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # strings <-> bytes conversions
href = f'<a href="data:file/csv;base64,{b64}" download="model_performance.csv">Download CSV File</a>'
return href
def build_model_Adaboost_Regressor(df):
"""
It builds a model using Adaboost regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.ensemble import AdaBoostRegressor
all=False
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
adaboost = AdaBoostRegressor(loss= loss, random_state= random_state)
grid = GridSearchCV(estimator=adaboost, param_grid=param_grid, cv=5, n_jobs=n_jobs)
grid.fit(X_train, Y_train)
st.subheader('Model Performance')
Y_pred_test = grid.predict(X_test)
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" %r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" %mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
all = True
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" %mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" %rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" %mae)
st.write("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
st.subheader('Model Parameters')
st.write(grid.get_params())
# Grid Data .......
grid_results = pd.concat(
[pd.DataFrame(grid.cv_results_["params"]), pd.DataFrame(grid.cv_results_["mean_test_score"], columns=["R2"])],
axis=1)
# Segment data into groups based on the 2 hyperparameters
grid_contour = grid_results.groupby(['learning_rate', 'n_estimators']).mean()
# Pivoting the data
grid_reset = grid_contour.reset_index()
grid_reset.columns = ['learning_rate', 'n_estimators', 'R2']
grid_pivot = grid_reset.pivot('learning_rate', 'n_estimators')
x = grid_pivot.columns.levels[1].values
y = grid_pivot.index.values
z = grid_pivot.values
# -----Plot-----#
layout = go.Layout(
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text='n_estimators')
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text='Learning_rate')
))
fig = go.Figure(data=[go.Surface(z=z, y=y, x=x)], layout=layout)
fig.update_layout(title='Hyperparameter tuning',
scene=dict(
xaxis_title='n_estimators',
yaxis_title='Learning_Rate',
zaxis_title='R2'),
autosize=False,
width=800, height=800,
margin=dict(l=65, r=50, b=65, t=90))
st.plotly_chart(fig)
if all == True:
criteria = ['RMSE', 'MSE', 'MAE']
# colors = {'RMSE': 'red',
# 'MSE': 'orange',
# 'MAE': 'lightgreen'}
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# Change the bar mode
fig.update_layout(barmode='group')
# -----Save grid data-----#
x = pd.DataFrame(x)
y = pd.DataFrame(y)
z = pd.DataFrame(z)
df = pd.concat([x, y, z], axis=1)
st.markdown(filedownload(grid_results), unsafe_allow_html=True)
##################################################### Linear regression to be worked on
def build_model_Linear_Regressor(df):
"""
It builds a model using Linear regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.linear_model import LinearRegression
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
model = LinearRegression()
if len(ind_var) == 1:
dfx = X_train[ind_var[0]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[0]].values.reshape(-1, 1)
model.fit(dfx, Y_train)
Y_pred_test = model.predict(dfxtest)
fig = px.scatter(df, x=ind_var[0], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[0]], y=Y_pred_test, name='Regression Fit'))
st.plotly_chart(fig)
if len(ind_var) == 2:
dfx = X_train[ind_var]
model.fit(dfx, Y_train)
dfxtest = X_test[ind_var]
mesh_size = .02
margin = 0
# Create a mesh grid on which we will run our model
x_min, x_max=X_test[ind_var[0]].min() - margin, X_test[ind_var[0]].max() + margin
y_min, y_max=X_test[ind_var[1]].min() - margin, X_test[ind_var[1]].max() + margin
xrange = np.arange(x_min, x_max, mesh_size)
yrange = np.arange(y_min, y_max, mesh_size)
xx, yy = np.meshgrid(xrange, yrange)
# Run model
pred = model.predict(np.c_[xx.ravel(), yy.ravel()])
pred = pred.reshape(xx.shape)
Y_pred_test = model.predict(dfxtest)
fig = px.scatter_3d(df, x=ind_var[0], y=ind_var[1], z=Y_test.name)
fig.update_traces(marker=dict(size=5))
fig.add_traces(go.Surface(x=xrange, y=yrange, z=pred, name='pred_surface'))
st.plotly_chart(fig)
if len(ind_var) > 2:
dfx = X_train[ind_var]
model.fit(dfx, Y_train)
dfxtest = X_test[ind_var]
Y_pred_test = model.predict(dfxtest)
st.subheader(f"Visualization shows how {Y_test.name} is dependent on individual variable")
c = len(ind_var)
for i in range(0,c):
dfx = X_train[ind_var[i]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[i]].values.reshape(-1, 1)
model.fit(dfx, Y_train)
pred = model.predict(dfxtest)
fig = px.scatter(df, x=ind_var[i], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[i]], y=pred, name='Regression Fit'))
st.plotly_chart(fig)
st.subheader('Model Performance')
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" %r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" %mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" %mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" %rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" %mae)
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# Change the bar mode
fig.update_layout(barmode='group')
##################################################Randomm Forest
def build_model_RandomForestRegressor(df):
"""
It builds a model using Adaboost regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.ensemble import RandomForestRegressor
all=False
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
# X_train.shape, Y_train.shape
# X_test.shape, Y_test.shape
rf = RandomForestRegressor(n_estimators=n_estimators,
random_state=random_state,
max_features=max_features,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs)
grid = GridSearchCV(estimator=rf, param_grid=param_grid, cv=5)
grid.fit(X_train, Y_train)
st.subheader('Model Performance')
Y_pred_test = grid.predict(X_test)
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" %r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" %mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
all = True
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" %mse)
st.write('Root Mean Squared Error (RMSE):')
rmse = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" %rmse)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" %mae)
st.write("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
st.subheader('Model Parameters')
st.write(grid.get_params())
# Grid Data .......
grid_results = pd.concat([pd.DataFrame(grid.cv_results_["params"]), pd.DataFrame(grid.cv_results_["mean_test_score"], columns=["R2"])], axis=1)
# Segment data into groups based on the 2 hyperparameters
grid_contour = grid_results.groupby(['max_features', 'n_estimators']).mean()
# Pivoting the data
grid_reset = grid_contour.reset_index()
grid_reset.columns = ['max_features', 'n_estimators', 'R2']
grid_pivot = grid_reset.pivot('max_features', 'n_estimators')
x = grid_pivot.columns.levels[1].values
y = grid_pivot.index.values
z = grid_pivot.values
# -----Plot-----#
layout = go.Layout(
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text='n_estimators')
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text='max_features')
))
fig = go.Figure(data=[go.Surface(z=z, y=y, x=x)], layout=layout)
fig.update_layout(title='Hyperparameter tuning (Surface Plot)',
scene=dict(
xaxis_title='n_estimators',
yaxis_title='max_features',
zaxis_title='R2'),
autosize=False,
width=800, height=800,
margin=dict(l=65, r=50, b=65, t=90))
st.plotly_chart(fig)
if all == True:
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rmse, mse, mae])])
st.plotly_chart(fig)
# -----Save grid data-----#
x = pd.DataFrame(x)
y = pd.DataFrame(y)
z = pd.DataFrame(z)
df = pd.concat([x, y, z], axis=1)
st.markdown(filedownload(grid_results), unsafe_allow_html=True)
################################################## SVR
def build_model_SVR(df):
"""
It builds a model using Support Vector regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.svm import SVR
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
model = SVR()
if len(ind_var) == 1:
dfx = X_train[ind_var[0]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[0]].values.reshape(-1, 1)
clf = GridSearchCV(model, param_grid)
clf.fit(dfx,Y_train)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter(df, x=ind_var[0], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[0]], y=Y_pred_test, name='Regression Fit'))
st.plotly_chart(fig)
if len(ind_var) == 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
mesh_size = .02
margin = 0
# Create a mesh grid on which we will run our model
x_min, x_max = X_test[ind_var[0]].min() - margin, X_test[ind_var[0]].max() + margin
y_min, y_max = X_test[ind_var[1]].min() - margin, X_test[ind_var[1]].max() + margin
xrange = np.arange(x_min, x_max, mesh_size)
yrange = np.arange(y_min, y_max, mesh_size)
xx, yy = np.meshgrid(xrange, yrange)
# Run model
pred = clf.predict(np.c_[xx.ravel(), yy.ravel()])
pred = pred.reshape(xx.shape)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter_3d(df, x=ind_var[0], y=ind_var[1], z=Y_test.name)
fig.update_traces(marker=dict(size=3))
fig.add_traces(go.Surface(x=xrange, y=yrange, z=pred, name='pred_surface'))
st.plotly_chart(fig)
if len(ind_var) > 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
st.subheader(f"Visualization shows how {Y_test.name} is dependent on individual variable")
c = len(ind_var)
clf1 = GridSearchCV(model, param_grid)
for i in range(0,c):
dfx = X_train[ind_var[i]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[i]].values.reshape(-1, 1)
clf1.fit(dfx, Y_train)
pred = clf1.predict(dfxtest)
fig = px.scatter(df, x=ind_var[i], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[i]], y=pred, name='Regression Fit'))
st.plotly_chart(fig)
st.write("The best parameters are %s with a score of %0.2f"
% (clf.best_params_, clf.best_score_))
st.subheader('Model Parameters')
st.write(clf.get_params())
st.subheader('Model Performance')
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" %r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" %mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" %mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" %mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" %rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" %mae)
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# st.subheader("Hyperparameter Tuning Results")
# df_gridsearch = pd.DataFrame(clf.cv_results_)
# dfViz = df_gridsearch[['param_C', 'param_gamma', 'mean_test_score']]
#
# pivot = pd.pivot_table(data=dfViz, index=['param_C'], columns=['param_gamma'], values=['mean_test_score'])
# sns.heatmap(pivot, annot=True)
# st.pyplot(plt)
# Change the bar mode
fig.update_layout(barmode='group')
################################################## SGD
def build_model_SGD(df):
"""
It builds a model using Stocastic gradient descent regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.linear_model import SGDRegressor
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
if scale == 'True':
from sklearn.preprocessing import StandardScaler
cols = X_train.columns
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_train = pd.DataFrame(X_train, columns=cols)
X_test = pd.DataFrame(X_test, columns=cols)
model = SGDRegressor()
if len(ind_var) == 1:
dfx = X_train[ind_var[0]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[0]].values.reshape(-1, 1)
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter(df, x=ind_var[0], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[0]], y=Y_pred_test, name='Regression Fit'))
st.plotly_chart(fig)
if len(ind_var) == 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
mesh_size = .02
margin = 0
# Create a mesh grid on which we will run our model
x_min, x_max=X_test[ind_var[0]].min() - margin, X_test[ind_var[0]].max() + margin
y_min, y_max=X_test[ind_var[1]].min() - margin, X_test[ind_var[1]].max() + margin
xrange = np.arange(x_min, x_max, mesh_size)
yrange = np.arange(y_min, y_max, mesh_size)
xx, yy = np.meshgrid(xrange, yrange)
# Run model
pred = clf.predict(np.c_[xx.ravel(), yy.ravel()])
pred = pred.reshape(xx.shape)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter_3d(df, x=ind_var[0], y=ind_var[1], z=Y_test.name)
fig.update_traces(marker=dict(size=3))
fig.add_traces(go.Surface(x=xrange, y=yrange, z=pred, name='pred_surface'))
st.plotly_chart(fig)
if len(ind_var) > 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
st.subheader(f"Visualization shows how {Y_test.name} is dependent on individual variable")
c = len(ind_var)
clf1 = GridSearchCV(model, param_grid)
for i in range(0, c):
dfx = X_train[ind_var[i]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[i]].values.reshape(-1, 1)
clf1.fit(dfx, Y_train)
pred = clf1.predict(dfxtest)
fig = px.scatter(df, x=ind_var[i], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[i]], y=pred, name='Regression Fit'))
st.plotly_chart(fig)
st.write("The best parameters are %s with a score of %0.2f"
% (clf.best_params_, clf.best_score_))
st.subheader('Model Parameters')
st.write(clf.get_params())
st.subheader('Model Performance')
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" % r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" % mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" % mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" % rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" % mae)
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# Change the bar mode
fig.update_layout(barmode='group')
################################################### Kernel Ridge
def build_model_KernelRidge(df):
"""
It builds a model using Kernel Ridge Regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.kernel_ridge import KernelRidge
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
if scale == 'True':
from sklearn.preprocessing import StandardScaler
cols = X_train.columns
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_train = pd.DataFrame(X_train, columns=cols)
X_test = pd.DataFrame(X_test, columns=cols)
model = KernelRidge()
if len(ind_var) == 1:
dfx = X_train[ind_var[0]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[0]].values.reshape(-1, 1)
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter(df, x=ind_var[0], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[0]], y=Y_pred_test, name='Regression Fit'))
st.plotly_chart(fig)
if len(ind_var) == 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
mesh_size = .02
margin = 0
# Create a mesh grid on which we will run our model
x_min, x_max=X_test[ind_var[0]].min() - margin, X_test[ind_var[0]].max() + margin
y_min, y_max=X_test[ind_var[1]].min() - margin, X_test[ind_var[1]].max() + margin
xrange = np.arange(x_min, x_max, mesh_size)
yrange = np.arange(y_min, y_max, mesh_size)
xx, yy = np.meshgrid(xrange, yrange)
# Run model
pred = clf.predict(np.c_[xx.ravel(), yy.ravel()])
pred = pred.reshape(xx.shape)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter_3d(df, x=ind_var[0], y=ind_var[1], z=Y_test.name)
fig.update_traces(marker=dict(size=3))
fig.add_traces(go.Surface(x=xrange, y=yrange, z=pred, name='pred_surface'))
st.plotly_chart(fig)
if len(ind_var) > 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
st.subheader(f"Visualization shows how {Y_test.name} is dependent on individual variable")
c = len(ind_var)
clf1 = GridSearchCV(model, param_grid)
for i in range(0, c):
dfx = X_train[ind_var[i]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[i]].values.reshape(-1, 1)
clf1.fit(dfx, Y_train)
pred = clf1.predict(dfxtest)
fig = px.scatter(df, x=ind_var[i], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[i]], y=pred, name='Regression Fit'))
st.plotly_chart(fig)
st.write("The best parameters are %s with a score of %0.2f"
% (clf.best_params_, clf.best_score_))
st.subheader('Model Parameters')
st.write(clf.get_params())
st.subheader('Model Performance')
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" % r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" % mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" % mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" % rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" % mae)
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# Change the bar mode
fig.update_layout(barmode='group')
################################################ Elastic Net
def build_model_ElasticNet(df):
"""
It builds a model using Elastic Net Regresion Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.linear_model import ElasticNet
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
if scale == 'True':
from sklearn.preprocessing import StandardScaler
cols = X_train.columns
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_train = pd.DataFrame(X_train, columns=cols)
X_test = pd.DataFrame(X_test, columns=cols)
model = ElasticNet()
if len(ind_var) == 1:
dfx = X_train[ind_var[0]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[0]].values.reshape(-1, 1)
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter(df, x=ind_var[0], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[0]], y=Y_pred_test, name='Regression Fit'))
st.plotly_chart(fig)
if len(ind_var) == 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
mesh_size = .02
margin = 0
# Create a mesh grid on which we will run our model
x_min, x_max=X_test[ind_var[0]].min() - margin, X_test[ind_var[0]].max() + margin
y_min, y_max=X_test[ind_var[1]].min() - margin, X_test[ind_var[1]].max() + margin
xrange = np.arange(x_min, x_max, mesh_size)
yrange = np.arange(y_min, y_max, mesh_size)
xx, yy = np.meshgrid(xrange, yrange)
# Run model
pred = clf.predict(np.c_[xx.ravel(), yy.ravel()])
pred = pred.reshape(xx.shape)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter_3d(df, x=ind_var[0], y=ind_var[1], z=Y_test.name)
fig.update_traces(marker=dict(size=3))
fig.add_traces(go.Surface(x=xrange, y=yrange, z=pred, name='pred_surface'))
st.plotly_chart(fig)
if len(ind_var) > 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
st.subheader(f"Visualization shows how {Y_test.name} is dependent on individual variable")
c = len(ind_var)
clf1 = GridSearchCV(model, param_grid)
for i in range(0, c):
dfx = X_train[ind_var[i]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[i]].values.reshape(-1, 1)
clf1.fit(dfx, Y_train)
pred = clf1.predict(dfxtest)
fig = px.scatter(df, x=ind_var[i], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[i]], y=pred, name='Regression Fit'))
st.plotly_chart(fig)
st.write("The best parameters are %s with a score of %0.2f"
% (clf.best_params_, clf.best_score_))
st.subheader('Model Parameters')
st.write(clf.get_params())
st.subheader('Model Performance')
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" % r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" % mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" % mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" % rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" % mae)
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# Change the bar mode
fig.update_layout(barmode='group')
################################################# Gradient boosting
def build_model_GradientBoosting(df):
"""
It builds a model using Gradient Boosting Regression Algorithm.
Takes input from streamlit web interface and use those inputs for building the model.
Used GridSearchCV for Hyperparameter Tunning.
Ploting the result using Plotly Framework.
:param df: dataframe containing features and labels.
"""
from sklearn.ensemble import GradientBoostingRegressor
X = df.iloc[:, :-1] # Using all column except for the last column as X
Y = df.iloc[:, -1] # Selecting the last column as Y
st.markdown('A model is being built to predict the following **Y** variable:')
st.info(Y.name)
# Data splitting
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=split_size)
if scale == 'True':
from sklearn.preprocessing import StandardScaler
cols = X_train.columns
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
X_train = pd.DataFrame(X_train, columns=cols)
X_test = pd.DataFrame(X_test, columns=cols)
model = GradientBoostingRegressor()
if len(ind_var) == 1:
dfx = X_train[ind_var[0]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[0]].values.reshape(-1, 1)
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter(df, x=ind_var[0], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[0]], y=Y_pred_test, name='Regression Fit'))
st.plotly_chart(fig)
if len(ind_var) == 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
mesh_size = .02
margin = 0
# Create a mesh grid on which we will run our model
x_min, x_max=X_test[ind_var[0]].min() - margin, X_test[ind_var[0]].max() + margin
y_min, y_max=X_test[ind_var[1]].min() - margin, X_test[ind_var[1]].max() + margin
xrange = np.arange(x_min, x_max, mesh_size)
yrange = np.arange(y_min, y_max, mesh_size)
xx, yy = np.meshgrid(xrange, yrange)
# Run model
pred = clf.predict(np.c_[xx.ravel(), yy.ravel()])
pred = pred.reshape(xx.shape)
Y_pred_test = clf.predict(dfxtest)
fig = px.scatter_3d(df, x=ind_var[0], y=ind_var[1], z=Y_test.name)
fig.update_traces(marker=dict(size=3))
fig.add_traces(go.Surface(x=xrange, y=yrange, z=pred, name='pred_surface'))
st.plotly_chart(fig)
if len(ind_var) > 2:
dfx = X_train[ind_var]
dfxtest = X_test[ind_var]
clf = GridSearchCV(model, param_grid)
clf.fit(dfx, Y_train)
Y_pred_test = clf.predict(dfxtest)
st.subheader(f"Visualization shows how {Y_test.name} is dependent on individual variable")
c = len(ind_var)
clf1 = GridSearchCV(model, param_grid)
for i in range(0, c):
dfx = X_train[ind_var[i]].values.reshape(-1, 1)
dfxtest = X_test[ind_var[i]].values.reshape(-1, 1)
clf1.fit(dfx, Y_train)
pred = clf1.predict(dfxtest)
fig = px.scatter(df, x=ind_var[i], y=Y_test.name, opacity=0.65)
fig.add_traces(go.Scatter(x=X_test[ind_var[i]], y=pred, name='Regression Fit'))
st.plotly_chart(fig)
st.write("The best parameters are %s with a score of %0.2f"
% (clf.best_params_, clf.best_score_))
st.subheader('Model Parameters')
st.write(clf.get_params())
st.subheader('Model Performance')
st.write('Coefficient of determination ($R^2$):')
st.info("%0.3f" % r2_score(Y_test, Y_pred_test))
if criterion == 'MSE':
st.write('Mean Squared Error (MSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test))
if criterion == 'MAE':
st.write('Mean Absolute Error (MAE):')
st.info("%0.2f" % mean_absolute_error(Y_test, Y_pred_test))
if criterion == 'RMSE':
st.write('Root Mean Squared Error (RMSE):')
st.info("%0.2f" % mean_squared_error(Y_test, Y_pred_test, squared=False))
if criterion == 'All':
st.write('Mean Squared Error (MSE):')
mse = mean_squared_error(Y_test, Y_pred_test)
st.info("%0.2f" % mse)
st.write('Root Mean Squared Error (RMSE):')
rsme = mean_squared_error(Y_test, Y_pred_test, squared=False)
st.info("%0.2f" % rsme)
st.write('Mean Absolute Error (MAE):')
mae = mean_absolute_error(Y_test, Y_pred_test)
st.info("%0.2f" % mae)
criteria = ['RMSE', 'MSE', 'MAE']
fig = go.Figure([go.Bar(x=criteria, y=[rsme, mse, mae])])
st.plotly_chart(fig)
# Change the bar mode
fig.update_layout(barmode='group')
# Page Layout ( Streamlit web Interface )
st.set_page_config(page_title="Regression Model Builder")
st.write("""
# Regression Model Builder
""")
# Sidebar ..............................................
# Sidebar - Collects user input features into dataframe
st.sidebar.header('Upload your CSV data')
uploaded_file = st.sidebar.file_uploader("Upload your input CSV file", type=["csv"])
st.sidebar.header("Parameter Configuration")
split_size = st.sidebar.slider('Data Split Ratio (training set)', 10,90,80,5)
st.sidebar.header("Select Regressor")
reg = st.sidebar.selectbox("Choose Regression Algorithm", options=['Linear Regression', 'SVR',
'Random Forest Regression', 'Adaboost', 'SGD Regression', 'Kernel Ridge Regression',
'ElasticNet Regression', 'Gradient Boosting Regression'])
if reg == 'Random Forest Regression':
st.sidebar.subheader('Learning Parameters')
n_estimators = st.sidebar.slider('Number of estimators (n_estimators)', 0, 500, (10, 50), 50)
n_estimators_step = st.sidebar.number_input('Step size for n_estimators (n_estimators_step)', 10)
st.sidebar.write('---')
max_features = st.sidebar.slider('Max features', 1, 50, (1, 3), 1)
max_features_step = st.sidebar.number_input('Step Size for max Features', 1)
st.sidebar.write('---')
min_samples_split = st.sidebar.slider(
'Minimum number of samples required to split an internal node (min_samples_split)', 1, 10, 2, 1)
min_samples_leaf = st.sidebar.slider('Minimum number of samples required to be at a leaf node (min_samples_leaf)',
1, 10, 2, 1)
st.sidebar.subheader('General Parameters')
random_state = st.sidebar.slider('Seed number (random_state)', 0, 1000, 42, 1)
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
bootstrap = st.sidebar.selectbox('Bootstrap samples when building trees (bootstrap)', options=[True, False])
oob_score = st.sidebar.selectbox('Whether to use out-of-bag samples to estimate the R^2 on unseen data (oob_score)',
options=[False, True])
n_jobs = st.sidebar.select_slider('Number of jobs to run in parallel (n_jobs)', options=[1, -1])
n_estimators_range = np.arange(n_estimators[0], n_estimators[1] + n_estimators_step, n_estimators_step)
max_features_range = np.arange(max_features[0], max_features[1] + max_features_step, max_features_step)
param_grid = dict(max_features=max_features_range, n_estimators=n_estimators_range)
if reg == 'Adaboost':
st.sidebar.subheader('Learning Parameters')
n_estimators = st.sidebar.slider('Number of estimators (n_estimators)', 0, 500, (10, 50), 50)
n_estimators_step = st.sidebar.number_input('Step size for n_estimators (n_estimators_step)', 10)
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
lr = [0.0001, 0.001, 0.01, 0.1]
learning_rate = st.sidebar.select_slider('Range of Learning Rate (learning_rate)',
options=[0.0001, 0.001, 0.01, 0.1], value=(0.0001, 0.01))
l = lr.index(learning_rate[0])
r = lr.index(learning_rate[1])
learning_rate_range = lr[l:r + 1]
st.sidebar.write('---')
st.sidebar.header("Loss")
loss = st.sidebar.selectbox("Choose Loss",options=['linear', 'square', 'exponential'])
st.sidebar.subheader('General Parameters')
random_state = st.sidebar.slider('Seed number (random_state)', 0, 1000, 42, 1)
n_jobs = st.sidebar.select_slider('Number of jobs to run in parallel (n_jobs)', options=[1, -1])
n_estimators_range = np.arange(n_estimators[0], n_estimators[1] + n_estimators_step, n_estimators_step)
param_grid = dict(learning_rate = learning_rate_range, n_estimators=n_estimators_range)
if reg == 'Linear Regression':
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
else:
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = pd.Series(diabetes.target, name='response')
df = pd.concat([X, Y], axis=1)
st.sidebar.subheader('Variable Configuration')
ind_var = st.sidebar.multiselect('Choose Independent Variables', options=df.columns)
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
if reg == 'SVR':
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
else:
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = pd.Series(diabetes.target, name='response')
df = pd.concat([X, Y], axis=1)
st.sidebar.subheader('Variable Configuration')
ind_var = st.sidebar.multiselect('Choose Independent Variables', options=df.columns)
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
st.sidebar.subheader("Hyperparameters for SVR")
st.sidebar.subheader("Kernel")
kernel = st.sidebar.selectbox("Enter from the options", options=['All', 'linear', 'rbf', 'poly'])
numbers = st.sidebar.text_input("Enter values for 'c'. (Separate values with ,)")
C = collect_numbers(numbers)
numbers = st.sidebar.text_input("Enter values for 'gamma'. (Separate values with ,)")
gamma = collect_numbers(numbers)
numbers = st.sidebar.text_input("Enter values for 'epsilon'. (Separate values with ,)")
epsilon = collect_numbers(numbers)
if kernel == 'All':
kernel = ['linear', 'rbf', 'poly']
else:
kernel = [kernel]
param_grid = dict(kernel = kernel, gamma = gamma, epsilon = epsilon, C = C)
if reg == 'SGD Regression':
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
else:
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = pd.Series(diabetes.target, name='response')
df = pd.concat([X, Y], axis=1)
st.sidebar.subheader('Variable Configuration')
ind_var = st.sidebar.multiselect('Choose Independent Variables', options=df.columns)
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
st.sidebar.subheader("Standard Scaling")
scale = st.sidebar.selectbox("Scale the data to be between -1 to 1", options=['True', 'False'])
st.sidebar.subheader("Hyperparameters for SGD Regressor")
numbers = st.sidebar.text_input("Enter values for 'alpha'. (Separate values with ,)")
alpha = collect_numbers(numbers)
loss = st.sidebar.selectbox("Loss", options=['All', 'squared_loss', 'huber', 'epsilon_insensitive'])
penalty = st.sidebar.selectbox("Penalty", options=['All', 'l2', 'l1', 'elasticnet'])
learning_rate = st.sidebar.selectbox("Learning Rate", options=['All', 'constant', 'optimal', 'invscaling'])
if loss == 'All':
loss = ['squared_loss', 'huber', 'epsilon_insensitive']
else:
loss = [loss]
if penalty == 'All':
penalty = ['l2', 'l1', 'elasticnet']
else:
penalty = [penalty]
if learning_rate == 'All':
learning_rate = ['constant', 'optimal', 'invscaling']
else:
learning_rate = [learning_rate]
param_grid = dict(alpha = alpha, loss = loss, penalty = penalty, learning_rate = learning_rate)
if reg == 'Kernel Ridge Regression':
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
else:
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = pd.Series(diabetes.target, name='response')
df = pd.concat([X, Y], axis=1)
st.sidebar.subheader('Variable Configuration')
ind_var = st.sidebar.multiselect('Choose Independent Variables', options=df.columns)
st.sidebar.subheader("Standard Scaling")
scale = st.sidebar.selectbox("Scale the data to be between -1 to 1", options=['True', 'False'])
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
st.sidebar.write('---')
st.sidebar.subheader("Hyperparameters for Kernel Ridge Regression")
st.sidebar.subheader("Kernel")
kernel = st.sidebar.selectbox("Enter from the options", options=['All', 'linear', 'rbf', 'poly'])
numbers = st.sidebar.text_input("Enter values for 'alpha'. (Separate values with ,)")
alpha = collect_numbers(numbers)
numbers = st.sidebar.text_input("Enter values for 'gamma'. (Separate values with ,)")
gamma = collect_numbers(numbers)
if kernel == 'All':
kernel = ['linear', 'rbf', 'poly']
else:
kernel = [kernel]
param_grid = dict(kernel = kernel, gamma = gamma, alpha = alpha)
if reg == 'ElasticNet Regression':
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
else:
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = pd.Series(diabetes.target, name='response')
df = pd.concat([X, Y], axis=1)
st.sidebar.subheader('Variable Configuration')
ind_var = st.sidebar.multiselect('Choose Independent Variables', options=df.columns)
st.sidebar.subheader("Standard Scaling")
scale = st.sidebar.selectbox("Scale the data to be between -1 to 1", options=['True', 'False'])
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
st.sidebar.write('---')
st.sidebar.subheader("Hyperparameters for ElasticNet Regression")
st.sidebar.subheader("Selection")
selection = st.sidebar.selectbox("Enter from the options", options=['All', 'cyclic', 'random'])
numbers = st.sidebar.text_input("Enter values for 'alpha'. (Separate values with ,)", value='1.0')
alpha = collect_numbers(numbers)
numbers = st.sidebar.text_input("Enter values for 'l1_ratio'. (Separate values with ,)", value='0.5')
l1_ratio = collect_numbers(numbers)
fit_intercept = st.sidebar.selectbox("Whether the intercept should be estimated or not", options=['Both', 'True', 'False'])
# if fit_intercept == 'Both' or fit_intercept == 'True':
# normalize = st.sidebar.selectbox("Regressors X will be normalized before regression by subtracting the mean and dividing by the l2-norm",
# options=['Both', 'True', 'False'])
# if normalize == 'Both':
# normalize = ['False', 'True']
# else:
# normalize = [normalize]
if selection == 'All':
selection = ['cyclic', 'random']
else:
selection = [selection]
if fit_intercept == 'Both':
fit_intercept = ['False', 'True']
else:
fit_intercept = [fit_intercept]
# if fit_intercept.__contains__('True'):
# param_grid = dict(selection = selection, l1_ratio = l1_ratio, alpha = alpha,
# fit_intercept = fit_intercept, normalize = normalize)
# else:
param_grid = dict(selection=selection, l1_ratio=l1_ratio, alpha=alpha,
fit_intercept=fit_intercept)
if reg == 'Gradient Boosting Regression':
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
else:
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = pd.Series(diabetes.target, name='response')
df = pd.concat([X, Y], axis=1)
st.sidebar.subheader('Variable Configuration')
ind_var = st.sidebar.multiselect('Choose Independent Variables', options=df.columns)
st.sidebar.subheader("Standard Scaling")
scale = st.sidebar.selectbox("Scale the data to be between -1 to 1", options=['True', 'False'])
st.sidebar.write('---')
criterion = st.sidebar.selectbox('Performance measure (criterion)', options=['All', 'MSE', 'MAE', 'RMSE'])
st.sidebar.write('---')
st.sidebar.header("Hyperparameters for Gradient Boosting Regression")
st.sidebar.subheader("Loss")
loss = st.sidebar.selectbox("Enter from the options", options=['All', 'squared_error', 'absolute_error', 'huber',
'quantile'])
st.sidebar.subheader("Learning Rate")
numbers = st.sidebar.text_input("Enter values for 'learning rate'. (Separate values with ,)", value='0.1')
learning_rate = collect_numbers(numbers)
numbers = st.sidebar.text_input("Enter number of estimators. (Separate values with ,)", value='100')
n_estimators = collect_numbers_int(numbers)
numbers = st.sidebar.text_input("Enter values for 'Subsample'. (Separate values with ,)", value='1.0')
subsample = collect_numbers(numbers)
numbers = st.sidebar.text_input("Enter minimum sample Split. (Separate values with ,)", value='2')
min_samples_split = collect_numbers_int(numbers)
numbers = st.sidebar.text_input("Enter minimum samples leaf. (Separate values with ,)", value='1')
min_samples_leaf = collect_numbers_int(numbers)
numbers = st.sidebar.text_input("Enter maximum depth. (Separate values with ,)", value='3')
max_depth = collect_numbers_int(numbers)
max_features = st.sidebar.selectbox("Maximum Features", options=['All', 'auto', 'sqrt', 'log2'])
if loss == 'All':
loss = ['squared_error', 'absolute_error', 'huber', 'quantile']
else:
loss = [loss]
if max_features == 'All':
max_features = ['auto', 'sqrt', 'log2']
else:
max_features = [max_features]
param_grid = dict(loss=loss, learning_rate=learning_rate, n_estimators=n_estimators, subsample=subsample,
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf,
max_depth=max_depth, max_features=max_features)
# main Body ...............................................................................................
st.subheader('Dataset')
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
st.write(df)
if reg == 'Random Forest Regression':
build_model_RandomForestRegressor(df)
if reg == 'Adaboost':
build_model_Adaboost_Regressor(df)
if reg == 'Linear Regression':
build_model_Linear_Regressor(df)
if reg == 'SVR':
build_model_SVR(df)
if reg == 'SGD Regression':
build_model_SGD(df)
if reg == 'Kernel Ridge Regression':
build_model_KernelRidge(df)
if reg == 'ElasticNet Regression':
build_model_ElasticNet(df)
if reg == 'Gradient Boosting Regression':
build_model_GradientBoosting(df)
else:
st.info('Awaiting for CSV file to be uploaded.')
if st.button('Press to use Example Dataset'):
diabetes = load_diabetes()
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
Y = | pd.Series(diabetes.target, name='response') | pandas.Series |
import joblib
from ..config import config
from .. import models
import fasttext
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import MultiLabelBinarizer
from keras import backend as K
from pathlib import Path
import logging
_logger = logging.getLogger(__name__)
def read_corpus_file(corpus_text_file_path):
whole_text = []
with open(corpus_text_file_path, 'r') as corpus_file:
for each in corpus_file.readlines():
whole_text.append(each.strip())
return whole_text
def load_data(data_file_path):
df = pd.read_csv(data_file_path)
return df
def fit_tokenizer(num_words, corpus_text, tokenizer_path):
try:
tokenizer = load_tokenizer(tokenizer_path)
except:
tokenizer = Tokenizer(num_words=num_words, lower=True, filters='"#()*+-/:;<=>?@[\\]^_`{|}~\t\n')
# tokenizer = Tokenizer(num_words=num_words, lower=True)
tokenizer.fit_on_texts(corpus_text)
joblib.dump(tokenizer, tokenizer_path)
return tokenizer
def train_embedding_model(corpus_text_file_path, embedding_dim, embedding_model, embedding_model_file_path):
model_path = Path(embedding_model_file_path)
if model_path.is_file():
pass
else:
model = fasttext.train_unsupervised(input=corpus_text_file_path,
model=embedding_model,
dim=embedding_dim)
model.save_model(embedding_model_file_path)
return
def build_embedding_matrix(word_index, embedding_dim, embedding_model_path, embedding_matrix_path):
try:
embedding_matrix = load_embedding_matrix(embedding_matrix_path)
except:
embedding_model = fasttext.load_model(embedding_model_path)
embedding_matrix = np.zeros((len(word_index) + 1, embedding_dim))
for word, i in word_index.items():
try:
embedding_matrix[i] = embedding_model.get_word_vector(word)
except:
embedding_matrix[i] = embedding_model.get_word_vector("unknown")
joblib.dump(embedding_matrix, embedding_matrix_path)
return embedding_matrix
def load_tokenizer(tokenizer_path):
return joblib.load(tokenizer_path)
def load_embedding_matrix(embedding_matrix_path):
return joblib.load(embedding_matrix_path)
def text_to_sequence_transformer(text_data, tokenizer):
return tokenizer.texts_to_sequences(text_data)
def padding_sequence_transformer(sequence_text_data, max_sequence_len):
return pad_sequences(sequence_text_data, maxlen=max_sequence_len)
def save_result(prob_prediction, prediction_file_path):
result_df = pd.DataFrame(prob_prediction, columns=config.ASPECT_TARGET)
result_df.to_csv(prediction_file_path, index=False)
return
def training_part_features_generation_for_stacking(model_name, X_train, y_train, embedding_matrix):
folds = 5
batch_size = len(X_train) / float(folds)
training_prediction = np.empty((0, len(config.ASPECT_TARGET)))
for i in range(folds):
_logger.info("Fold {} Model Traning...".format(i+1))
X_5fold_test = X_train[int(round(batch_size * i)):int(round(batch_size * (i+1)))]
y_5fold_test = y_train[int(round(batch_size * i)):int(round(batch_size * (i+1)))]
X_5fold_train = np.concatenate((X_train[:int(round(batch_size * i))],
X_train[int(round(batch_size * (i+1))):]))
y_5fold_train = np.concatenate((y_train[:int(round(batch_size * i))],
y_train[int(round(batch_size * (i+1))):]))
K.clear_session()
if model_name == config.MODEL1_NAME:
model = models.pooled_rnn_aspect_clf_for_fold
elif model_name == config.MODEL2_NAME:
model = models.pooled_rnn_text_cnn_aspect_clf_for_fold
model.set_params(embedding_matrix=embedding_matrix,
validation_data=(X_5fold_test, y_5fold_test))
model.fit(X_5fold_train, y_5fold_train)
fold_prediction = model.predict_proba(X_5fold_test)
training_prediction = np.concatenate((training_prediction, fold_prediction))
return training_prediction
def test_part_features_generation_for_stacking(model, X_train, y_train, X_test, y_test):
model.fit(X_train, y_train)
test_prediction = model.predict_proba(X_test)
return test_prediction
def read_predictions(selected_model_list, file_mapping):
predictions_to_be_concatenated = []
for model_name in selected_model_list:
df = | pd.read_csv(file_mapping[model_name]) | pandas.read_csv |
import pandas as pd
import os
# Load the data
df = pd.read_pickle('data_frame.pickle')
# Get distinct artist
artists = df['artist']
unique_artists = | pd.unique(artists) | pandas.unique |
from simulationClasses import DCChargingStations, Taxi, Bus, BatterySwappingStation
import numpy as np
import pandas as pd
from scipy import stats, integrate
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.dates import DateFormatter, HourLocator, MinuteLocator, AutoDateLocator
import seaborn as sns
import csv
import sys
from datetime import datetime,date,timedelta
import random
from math import ceil
import math
sns.set_context("paper")
sns.set(font_scale=2)
sns.set_style("whitegrid", {
"font.family": "serif",
"font.serif": ["Times", "Palatino", "serif"],
'grid.color': '.9',
'grid.linestyle': '--',
})
taxiChargingStation = DCChargingStations(5)
taxiFleet =[]
for i in range(100):
newTaxi = Taxi()
newTaxi.useSwapping = 0
taxiFleet.append(newTaxi)
busChargingStation = DCChargingStations(5)
busFleet = []
for i in range(20):
newBus = Bus()
newBus.useSwapping = 0
busFleet.append(newBus)
time = 0
taxiIncome = []
busIncome = []
taxiChargerIncome = []
busChargerIncome = []
while time < 24*60*7:
tempTaxiFleet = []
todayTaxiIncome = 0
todayBusIncome = 0
for runningTaxi in taxiFleet:
runningTaxi.decideChargeMode(time)
if runningTaxi.chargingMode == 1:
taxiChargingStation.addCharge(runningTaxi)
else:
runningTaxi.getTravelSpeed(time)
tempTaxiFleet.append(runningTaxi)
taxiFleet = tempTaxiFleet
tempChargingVehicles = []
for chargingTaxi in taxiChargingStation.chargingVehicles:
chargingTaxi.decideChargeMode(time)
if chargingTaxi.chargingMode == 0:
chargingTaxi.getTravelSpeed(time)
taxiFleet.append(chargingTaxi)
else:
chargingTaxi.charge(time,0,taxiChargingStation.chargeSpeed)
tempChargingVehicles.append(chargingTaxi)
taxiChargingStation.chargingVehicles = tempChargingVehicles
while taxiChargingStation.numberOfStations - len(taxiChargingStation.chargingVehicles) > 0:
if len(taxiChargingStation.pendingVehicles) > 0:
newChargeTaxi = taxiChargingStation.pendingVehicles.pop(0)
newChargeTaxi.charge(time,0,taxiChargingStation.chargeSpeed)
taxiChargingStation.chargingVehicles.append(newChargeTaxi)
else:
break
taxiChargingStation.charge()
tempBusFleet = []
for runningBus in busFleet:
runningBus.decideChargeMode(time)
if runningBus.chargingMode == 1:
busChargingStation.addCharge(runningBus)
else:
runningBus.getTravelSpeed(time)
tempBusFleet.append(runningBus)
busFleet = tempBusFleet
tempChargingVehicles = []
for chargingBus in busChargingStation.chargingVehicles:
chargingBus.decideChargeMode(time)
if chargingBus.chargingMode == 0:
chargingBus.getTravelSpeed(time)
busFleet.append(chargingBus)
else:
chargingBus.charge(time, 0, busChargingStation.chargeSpeed)
tempChargingVehicles.append(chargingBus)
busChargingStation.chargingVehicles = tempChargingVehicles
while busChargingStation.numberOfStations - len(busChargingStation.chargingVehicles) > 0:
if len(busChargingStation.pendingVehicles) > 0:
newChargeBus = busChargingStation.pendingVehicles.pop(0)
newChargeBus.charge(time, 0, busChargingStation.chargeSpeed)
busChargingStation.chargingVehicles.append(newChargeBus)
else:
break
busChargingStation.charge()
for taxi in taxiFleet + taxiChargingStation.chargingVehicles + taxiChargingStation.pendingVehicles:
todayTaxiIncome += taxi.income
for bus in busFleet + busChargingStation.chargingVehicles + busChargingStation.pendingVehicles:
todayBusIncome += bus.income
taxiIncome.append([time,todayTaxiIncome,len(taxiFleet),len(taxiChargingStation.chargingVehicles),len(taxiChargingStation.pendingVehicles)])
busIncome.append([time,todayBusIncome,len(busFleet),len(busChargingStation.chargingVehicles),len(busChargingStation.pendingVehicles)])
taxiChargerIncome.append([time,taxiChargingStation.income])
busChargerIncome.append([time, busChargingStation.income])
time += 1
taxiIncomeDataFrame = pd.DataFrame(taxiIncome,columns=["time","income","running","charging","waiting"])
busIncomeDataFrame = pd.DataFrame(busIncome,columns=["time","income","running","charging","waiting"])
taxiChargerIncomeDataFrame = pd.DataFrame(taxiChargerIncome,columns=["time","income"])
busChargerIncomeDataFrame = pd.DataFrame(busChargerIncome,columns=["time","income"])
plt.figure(figsize=(9, 16), dpi=1600)
ax = plt.subplot(4,1,1)
for day in range(7):
plt.axvspan(2*60 + day*60*24, 5*60 + day*60*24, facecolor='g', alpha=0.1)
plt.axvspan(18*60 + day*60*24, 21*60 + day*60*24, facecolor='r', alpha=0.1)
ax2 = plt.subplot(4,1,2)
ax3 = plt.subplot(4,1,3)
for day in range(7):
plt.axvspan(2*60 + day*60*24, 5*60 + day*60*24, facecolor='g', alpha=0.1)
plt.axvspan(18*60 + day*60*24, 21*60 + day*60*24, facecolor='r', alpha=0.1)
ax4 = plt.subplot(4,1,4)
taxiIncomeDataFrame.plot(x="time",y="income",ax=ax,label="")
taxiIncomeDataFrame.plot(x="time",y="running",ax=ax2,label="Running",style="-")
taxiIncomeDataFrame.plot(x="time",y="charging",ax=ax2,label="Charging", style=":")
taxiIncomeDataFrame.plot(x="time",y="waiting",ax=ax2,label="Waiting",style="-.")
busIncomeDataFrame.plot(x="time",y="income",ax=ax3,label="")
busIncomeDataFrame.plot(x="time",y="running",ax=ax4,label="Running",style="-")
busIncomeDataFrame.plot(x="time",y="charging",ax=ax4,label="Charging",style=":")
busIncomeDataFrame.plot(x="time",y="waiting",ax=ax4,label="Waiting",style="-.")
ax.set(ylabel= "Income ($)", xlabel='Time (min)')
ax.legend_.remove()
ax2.set(ylabel= "Number", xlabel='Time (min)')
ax3.set(ylabel= "Income ($)", xlabel='Time (min)')
ax3.legend_.remove()
ax4.set(ylabel= "Number", xlabel='Time (min)')
plt.tight_layout()
box = ax2.get_position()
ax2.set_position([box.x0, box.y0, box.width * 0.8, box.height])
box = ax4.get_position()
ax4.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax2.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax4.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig('busTaxiSimulationResult.pdf', bbox_inches='tight')
# print(taxiIncomeDataFrame)
print(busIncomeDataFrame)
#
# print(taxiIncomeDataFrame.sum())
# print(taxiIncomeDataFrame.sum()/24/7/60/100)
# print("tc1:")
# print(taxiIncomeDataFrame[(taxiIncomeDataFrame["time"]%(24*60) > 18*60) & (taxiIncomeDataFrame["time"]%(24*60) < 21*60)].sum()/60/3/7/100)
# print("tc2:")
# print(taxiIncomeDataFrame[(taxiIncomeDataFrame["time"]%(24*60) > 2*60) & (taxiIncomeDataFrame["time"]%(24*60) < 5*60)].sum()/60/3/7/100)
# print("tc3:")
# print(taxiIncomeDataFrame[(taxiIncomeDataFrame["time"]%(24*60) > 5*60) & (taxiIncomeDataFrame["time"]%(24*60) < 18*60)].sum()/60/13/7/100)
# print("tc4:")
# print(taxiIncomeDataFrame[(taxiIncomeDataFrame["time"]%(24*60) > 21*60) | (taxiIncomeDataFrame["time"]%(24*60) < 2*60)].sum()/60/5/7/100)
print(busIncomeDataFrame.sum())
print(busIncomeDataFrame.sum()/24/7/60/100)
print("tc1:")
print(busIncomeDataFrame[(busIncomeDataFrame["time"]%(24*60) > 18*60) & (busIncomeDataFrame["time"]%(24*60) < 21*60)].sum()/60/7/20)
print("tc2:")
print(busIncomeDataFrame[(busIncomeDataFrame["time"]%(24*60) > 2*60) & (busIncomeDataFrame["time"]%(24*60) < 5*60)].sum()/60/7/20)
print("tc3:")
print(busIncomeDataFrame[(busIncomeDataFrame["time"]%(24*60) > 5*60) & (busIncomeDataFrame["time"]%(24*60) < 18*60)].sum()/60/7/20)
print("tc4:")
print(busIncomeDataFrame[(busIncomeDataFrame["time"]%(24*60) > 21*60) | (busIncomeDataFrame["time"]%(24*60) < 2*60)].sum()/60/7/20)
taxiSwappingStation = BatterySwappingStation(5, 30)
taxiFleet =[]
for i in range(100):
newTaxi = Taxi()
newTaxi.useSwapping = 1
taxiFleet.append(newTaxi)
busSwappingStation = BatterySwappingStation(5, 324)
busFleet = []
for i in range(20):
newBus = Bus()
newBus.useSwapping = 1
busFleet.append(newBus)
time = 0
taxiIncome = []
busIncome = []
taxiSwapperIncome = []
busSwapperIncome = []
swapRecord = []
while time < 24*60*7:
tempTaxiFleet = []
todayTaxiIncome = 0
todayBusIncome = 0
taxiMileage = 0
for runningTaxi in taxiFleet:
runningTaxi.decideChargeMode(time)
if runningTaxi.chargingMode == 1:
result = taxiSwappingStation.addVehicle(runningTaxi)
swapRecord.append([time, runningTaxi.remainingBatterykWh])
if result > 0:
runningTaxi.charge(time,result,0)
# print("get into queue:" + str(time))
taxiSwappingStation.swappingVehicles.append(runningTaxi)
else:
runningTaxi.getTravelSpeed(time)
tempTaxiFleet.append(runningTaxi)
taxiFleet = tempTaxiFleet
tempSwappingVehicles = []
for swappingTaxi in taxiSwappingStation.swappingVehicles:
swappingTaxi.charge(time,0,0)
if swappingTaxi.chargingMode == 0:
swappingTaxi.getTravelSpeed(time)
taxiFleet.append(swappingTaxi)
else:
tempSwappingVehicles.append(swappingTaxi)
taxiSwappingStation.swappingVehicles = tempSwappingVehicles
while len(taxiSwappingStation.pendingVehicles):
if len(taxiSwappingStation.swappingVehicles) < taxiSwappingStation.numberOfSlot:
newTaxi = taxiSwappingStation.pendingVehicles.pop(0)
result = taxiSwappingStation.swap(newTaxi.remainingBatterykWh)
newTaxi.charge(time,result,0)
# print("bump from pending to swap:" + str(time))
taxiSwappingStation.swappingVehicles.append(newTaxi)
else:
break
tempBusFleet = []
for runningBus in busFleet:
runningBus.decideChargeMode(time)
if runningBus.chargingMode == 1:
result = busSwappingStation.addVehicle(runningBus)
if result > 0:
runningBus.charge(time, result, 0)
busSwappingStation.swappingVehicles.append(runningBus)
else:
runningBus.getTravelSpeed(time)
tempBusFleet.append(runningBus)
busFleet = tempBusFleet
tempSwappingVehicles = []
for swappingBus in busSwappingStation.swappingVehicles:
swappingBus.charge(time, 0, 0)
if swappingBus.chargingMode == 0:
swappingBus.getTravelSpeed(time)
busFleet.append(swappingBus)
else:
tempSwappingVehicles.append(swappingBus)
busSwappingStation.swappingVehicles = tempSwappingVehicles
while len(busSwappingStation.pendingVehicles) > 0:
if len(busSwappingStation.swappingVehicles) < busSwappingStation.numberOfSlot:
newBus = busSwappingStation.pendingVehicles.pop(0)
result = busSwappingStation.swap(newBus.remainingBatterykWh)
newBus.charge(time, result, 0)
busSwappingStation.swappingVehicles.append(newBus)
else:
break
for taxi in taxiFleet + taxiSwappingStation.swappingVehicles + taxiSwappingStation.pendingVehicles:
todayTaxiIncome += taxi.income
for bus in busFleet + busSwappingStation.swappingVehicles + busSwappingStation.pendingVehicles:
todayBusIncome += bus.income
taxiIncome.append([time,todayTaxiIncome,len(taxiFleet),len(taxiSwappingStation.swappingVehicles),len(taxiSwappingStation.pendingVehicles),\
len(taxiFleet)+len(taxiSwappingStation.swappingVehicles)+len(taxiSwappingStation.pendingVehicles)])
busIncome.append([time,todayBusIncome,len(busFleet),len(busSwappingStation.swappingVehicles),len(busSwappingStation.pendingVehicles), \
len(busFleet) + len(busSwappingStation.swappingVehicles) + len(busSwappingStation.pendingVehicles)])
taxiSwapperIncome.append([time, taxiSwappingStation.income])
busSwapperIncome.append([time, busSwappingStation.income])
time += 1
taxiIncomeDataFrame = pd.DataFrame(taxiIncome,columns=["time","income","running","swapping","waiting","total"])
busIncomeDataFrame = pd.DataFrame(busIncome,columns=["time","income","running","swapping","waiting","total"])
taxiSwapperIncomeDataFrame = | pd.DataFrame(taxiSwapperIncome,columns=["time","income"]) | pandas.DataFrame |
#!/usr/bin/env python3
import abc
from functools import partial
from typing import Generator, Optional, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler, Normalizer, StandardScaler
from datafold.pcfold import TSCDataFrame
from datafold.utils.general import is_df_same_index, is_integer, series_if_applicable
class TSCMetric(object):
"""Compute metrics for time series collection data.
Parameters
----------
metrics
* "rmse" - root mean squared error,
* "rrmse" - relative root mean squared error
* "mse" - mean squared error,
* "mae" - mean absolute error,
* "max" maximum error,
* "l2" - Eucledian norm
mode
compute metric per "timeseries", "timestep" or "feature"
scaling
Prior scaling (useful for heterogeneous time series features).
* "id" - no scaling,
* "min-max" - each feature is scaled into (0, 1) range,
* "standard" - remove mean and scale to unit variance for each feature,
* "l2_normalize" - divide each feature by Euclidean norm
References
----------
"rrmse" is taken from :cite:`le_clainche_higher_2017`
"""
_cls_valid_modes = ["timeseries", "timestep", "feature"]
_cls_valid_metrics = ["rmse", "rrmse", "mse", "mape", "mae", "medae", "max", "l2"]
_cls_valid_scaling = ["id", "min-max", "standard", "l2_normalize"]
def __init__(self, metric: str, mode: str, scaling: str = "id"):
mode = mode.lower()
metric = metric.lower()
if metric in self._cls_valid_metrics:
self.metric = self._metric_from_str_input(metric)
else:
raise ValueError(
f"Invalid metric={mode}. Choose from {self._cls_valid_metrics}"
)
if mode in self._cls_valid_modes:
self.mode = mode
else:
raise ValueError(
f"Invalid mode='{mode}'. Choose from {self._cls_valid_modes}"
)
self.scaling = self._select_scaling(name=scaling)
def _select_scaling(self, name):
if name == "id":
return None
elif name == "min-max":
return MinMaxScaler()
elif name == "standard":
return StandardScaler()
elif name == "l2_normalize":
return Normalizer(norm="l2")
else:
raise ValueError(
f"scaling={name} is not known. Choose from {self._cls_valid_scaling}"
)
def _scaling(self, y_true: TSCDataFrame, y_pred: TSCDataFrame):
# it is checked before that y_true and y_pred indices/columns are identical
index, columns = y_true.index, y_true.columns
# first normalize y_true, afterwards (with the same factors from y_true!) y_pred
if self.scaling is not None: # is None if scaling is identity
y_true = self.scaling.fit_transform(y_true)
y_pred = self.scaling.transform(y_pred.to_numpy())
y_true = TSCDataFrame(y_true, index=index, columns=columns)
y_pred = TSCDataFrame(y_pred, index=index, columns=columns)
return y_true, y_pred
def _l2_metric(
self, y_true, y_pred, sample_weight=None, multioutput="uniform_average"
):
diff = y_true - y_pred
if sample_weight is not None:
diff = sample_weight[:, np.newaxis] * diff
l2_norm = np.linalg.norm(diff, axis=0)
if multioutput == "uniform_average":
l2_norm = np.mean(l2_norm)
return l2_norm
def _medae_metric(
self, y_true, y_pred, sample_weight=None, multioutput="uniform_average"
):
"""Median absolute error."""
if sample_weight is not None:
raise ValueError("Median absolute error does not support sample_weight.")
return metrics.median_absolute_error(
y_true=y_true, y_pred=y_pred, multioutput=multioutput
)
# def _mer_metric(
# self, y_true, y_pred, sample_weight=None, multioutput="uniform_average"
# ):
# r"""Mean error relative to mean observation
# Each time series must have the same length (corresponding to a prediction
# horizon).
#
# The error is taken from https://www.ijcai.org/Proceedings/2017/0277.pdf
#
# The MER is computed with
# .. math::
# \text{MER} = 100 \cdot \frac{1}{N} \sum_{i=1}^N
# \frac{\vert y - \hat{y} \vert}{\bar{y}}
# """
# # TODO: this metric shows a problem in the current setting
# # -- it does not fir in the metric_per_[timeseries|feature|timestep]
#
# if self.mode == "timestep":
# raise ValueError("Metric 'mean error relative to mean observation' does not "
# "support mode 'timestep'.")
#
# if sample_weight is not None:
# raise NotImplementedError("Sample weight is not implemented ")
#
# N = y_true.shape[0]
# error = (100 * 1 / N * ((y_true - y_pred).abs() / y_true.mean()).sum())
#
# if multioutput == "uniform_average":
# error = np.mean(error)
# return error
def _rrmse_metric(
self, y_true, y_pred, sample_weight=None, multioutput="uniform_average"
):
"""Metric from :cite:`le_clainche_higher_2017`"""
if multioutput == "uniform_average":
norm_ = np.sum(np.square(np.linalg.norm(y_true, axis=1)))
else: # multioutput == "raw_values":
norm_ = np.sum(np.square(y_true), axis=0)
if (np.asarray(norm_) <= 1e-14).any():
raise RuntimeError(
f"norm factor(s) are too small for rrmse \n norm_factor = {norm_}"
)
mse_error = metrics.mean_squared_error(
y_true, y_pred, sample_weight=sample_weight, multioutput=multioutput
)
mse_error_relative = np.divide(mse_error, norm_)
return np.sqrt(mse_error_relative)
def _max_error(
self, y_true, y_pred, sample_weight=None, multioutput="uniform_average"
):
"""Wrapper for :class:`sklean.metrics.max_error` to allow `sample_weight` and
`multioutput` arguments (both have not effect).
"""
# fails if y is multioutput
return metrics.max_error(y_true=y_true, y_pred=y_pred)
def _metric_from_str_input(self, error_metric: str):
error_metric = error_metric.lower()
from typing import Callable
error_metric_handle: Callable
if error_metric == "rmse": # root mean squared error
error_metric_handle = partial(metrics.mean_squared_error, squared=False)
elif error_metric == "rrmse": # relative root mean squared error
error_metric_handle = self._rrmse_metric # type: ignore
elif error_metric == "mape": # mean absolute percentage error
error_metric_handle = metrics.mean_absolute_percentage_error # type: ignore
elif error_metric == "mse":
error_metric_handle = metrics.mean_squared_error
elif error_metric == "mae":
error_metric_handle = metrics.mean_absolute_error
elif error_metric == "medae": # median absolute error
error_metric_handle = self._medae_metric
elif error_metric == "max":
error_metric_handle = self._max_error
elif error_metric == "l2":
error_metric_handle = self._l2_metric
else:
raise ValueError(f"Metric {error_metric} not known. Please report bug.")
return error_metric_handle
def _is_scalar_multioutput(self, multioutput) -> bool:
# Return True if there is only one column (because features are averaged)
if (
isinstance(multioutput, str) and multioutput == "uniform_average"
) or isinstance(multioutput, np.ndarray):
# array -> average with weights
scalar_score = True
elif multioutput == "raw_values":
scalar_score = False
else:
raise ValueError(f"Illegal argument multioutput='{multioutput}'")
return scalar_score
def _single_column_name(self, multioutput) -> list:
assert self._is_scalar_multioutput(multioutput)
if isinstance(multioutput, str) and multioutput == "uniform_average":
column = ["metric_uniform_average"]
elif isinstance(multioutput, np.ndarray):
column = ["metric_user_weights"]
else:
raise ValueError(f"Illegal argument of multioutput={multioutput}")
return column
def _metric_per_timeseries(
self,
y_true: TSCDataFrame,
y_pred: TSCDataFrame,
sample_weight=None,
multioutput="uniform_average",
) -> Union[pd.Series, pd.DataFrame]:
if sample_weight is not None:
# same length of time series to have mapping
# sample_weight -> time step of time series (can be a different time value)
y_true.tsc.check_timeseries_same_length()
if sample_weight.shape[0] != y_true.n_timesteps:
raise ValueError(
f"'sample_weight' length (={len(sample_weight)}) "
f"does not match the number of time steps (={y_true.n_timesteps})"
)
if self._is_scalar_multioutput(multioutput=multioutput):
column = self._single_column_name(multioutput=multioutput)
# Make in both cases a DataFrame and later convert to Series in the scalar
# case this allows to use .loc[i, :] in the loop
error_per_timeseries = pd.DataFrame(
np.nan, index=y_true.ids, columns=column
)
else:
error_per_timeseries = pd.DataFrame(
np.nan,
index=y_true.ids,
columns=y_true.columns.to_list(),
)
for i, y_true_single in y_true.itertimeseries():
y_pred_single = y_pred.loc[i, :]
error_per_timeseries.loc[i, :] = self.metric(
y_true_single,
y_pred_single,
sample_weight=sample_weight,
multioutput=multioutput,
)
return series_if_applicable(error_per_timeseries)
def _metric_per_feature(
self,
y_true: TSCDataFrame,
y_pred: TSCDataFrame,
sample_weight=None,
multioutput="raw_values",
):
# Note: score per feature is never a multioutput-average, because a feature is
# seen as a scalar quantity
if sample_weight is not None:
if sample_weight.shape[0] != y_true.shape[0]:
raise ValueError(
f"'sample_weight' length (={sample_weight.shape[0]}) "
f"does not match the number of feature values "
f"(y.shape[0]={y_true.shape[0]})"
)
metric_per_feature = self.metric(
y_true.to_numpy(),
y_pred.to_numpy(),
sample_weight=sample_weight,
multioutput="raw_values", # raw_values to tread every feature separately
)
metric_per_feature = pd.Series(
metric_per_feature,
index=y_true.columns,
)
return metric_per_feature
def _metric_per_timestep(
self,
y_true: TSCDataFrame,
y_pred: TSCDataFrame,
sample_weight=None,
multioutput="uniform_average",
):
if sample_weight is not None:
# sample weights -> each time series has a different weight
# Currently, all time series must have the same time values to have the same
# length for each time step
y_true.tsc.check_timeseries_same_length()
# the weight, must be as long as the time series
if sample_weight.shape[0] != y_true.n_timeseries:
raise ValueError(
f"'sample_weight' shape (={sample_weight.shape[0]}) "
f"does not match the number of time series (={y_true.n_timeseries})."
)
time_indices = pd.Index(y_true.time_values(), name="time")
if self._is_scalar_multioutput(multioutput=multioutput):
column = self._single_column_name(multioutput=multioutput)
# Make in both cases a DataFrame and later convert to Series in the scalar
# case this allows to use .loc[i, :] in the loop
metric_per_time = pd.DataFrame(np.nan, index=time_indices, columns=column)
else:
metric_per_time = pd.DataFrame(
np.nan, index=time_indices, columns=y_true.columns.to_list()
)
metric_per_time.index = metric_per_time.index.set_names(
TSCDataFrame.tsc_time_idx_name
)
idx_slice = pd.IndexSlice
for t in time_indices:
y_true_t = pd.DataFrame(y_true.loc[idx_slice[:, t], :])
y_pred_t = pd.DataFrame(y_pred.loc[idx_slice[:, t], :])
metric_per_time.loc[t, :] = self.metric(
y_true_t,
y_pred_t,
sample_weight=sample_weight,
multioutput=multioutput,
)
return series_if_applicable(metric_per_time)
def __call__(
self,
y_true: TSCDataFrame,
y_pred: TSCDataFrame,
sample_weight: Optional[np.ndarray] = None,
multioutput: Union[str, np.ndarray] = "raw_values",
) -> Union[pd.Series, pd.DataFrame]:
"""Compute metric between two time series collections.
Parameters
----------
y_true
Ground truth time series collection (basis for scaling), of shape
`(n_samples, n_features)`.
y_pred
Predicted time series (the same scaling as for `y_true` will be applied),
with exact same index (`ID` and `time` and columns as `y_true`).
sample_weight
Gives samples individual weights depending on the `mode`.
* `mode=timeseries` array of shape `(n_timesteps,)`. Each time step has a \
different weight (note that time values can be different).
* `mode=feature` array of shape `(n_samples,)`. Each feature sample has a \
different weight.
* `mode=timestep` array of shape `(n_timeseries,)`. Each time series has \
a different weight.
multioutput
Handling of metric if evaluated over multiple features (columns). Specify
how to weight each feature. The parameter is ignored for `mode=feature`,
because each feature is a single output.
* "raw_values" - returns metric per feature (i.e., the metric is not reduced)
* "uniform_average" - returns metric averaged over all features averaged with
uniform weight
* ``numpy.ndarray`` of shape `(n_features,)` - returns metric for all \
features averaged with specified weights
Returns
-------
Union[pd.Series, pandas.DataFrame]
metric evaluations, `pandas.DataFrame` for `multioutput=raw_values`
Raises
------
TSCException
If not all values are finite in `y_true` or `y_pred` or if \
:class:`TSCDataFrame` properties do not allow for a `sample_weight` argument.
"""
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if sample_weight.ndim != 1:
raise ValueError("'sample_weight' must be an 1-dim. array")
# checks:
y_true.tsc.check_finite()
y_pred.tsc.check_finite()
if not is_df_same_index(
y_true, y_pred, check_index=True, check_column=True, handle="return"
):
raise ValueError("Indices between 'y_pred' and 'y_true' must be equal.")
# scaling:
y_true, y_pred = self._scaling(y_true=y_true, y_pred=y_pred)
# compute metric depending on mode:
if self.mode == "timeseries":
metric_result = self._metric_per_timeseries(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput=multioutput,
)
elif self.mode == "timestep":
metric_result = self._metric_per_timestep(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput=multioutput,
)
elif self.mode == "feature":
metric_result = self._metric_per_feature(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput="raw_values",
)
else:
raise ValueError(f"Invalid mode={self.mode}. Please report bug.")
if isinstance(metric_result, pd.Series):
assert not metric_result.isnull().any()
elif isinstance(metric_result, pd.DataFrame):
assert not metric_result.isnull().any().any()
else:
raise RuntimeError(
f"Unknown return type {type(metric_result)}. Please report bug."
)
return metric_result
class TSCScoring(object):
"""Create scoring function from :class:`.TSCMetric`.
Parameters
----------
tsc_metric
Time series collections metric.
greater_is_better
If True, the metric measures accuracy, else the metric measures the error.
**metric_kwargs
keyword arguments "sample_weight" and "multioutput" for
:py:meth:`TSCMetric.__call__`
Notes
-----
According to scikit-learn a `score` is a scalar value where higher values are \
better than lower return values \
(`ref <https://scikit-learn.org/stable/modules/model_evaluation.html>`_). This means:
* Usually :class:`.TSCMetric` returns a vector metric with multiple components
(metric per time series, timestep or feature). Therefore, the metric values
must be "compressed" again to obtain a single score value.
* Currently, all metrics measure the error, to comply with "higher score values
are better" the metric values are negated.
"""
def __init__(self, tsc_metric: TSCMetric, greater_is_better=False, **metric_kwargs):
self.tsc_metric = tsc_metric
self.metric_kwargs = metric_kwargs
self.greater_is_better = greater_is_better
def __call__(
self,
y_true: TSCDataFrame,
y_pred: TSCDataFrame,
sample_weight: Optional[np.ndarray] = None,
) -> float:
"""Computes score between two time series collections.
Parameters
----------
y_true
Ground truth time series data.
y_pred
Predicted time series data.
sample_weight
Not to be confused with parameter `samples_weight` in
:py:meth:`TSCMetric.__call__`.
The metric values (usually multiple values, depending on mode) can be weighted
for the score:
* `TSCMetric.mode=feature` - weight array of shape `(n_feature,)`
* `TSCMetric.mode=timeseries` - weight array of shape `(n_timeseries,)`
* `TSCMetric.mode=time` - weight array of shape `(n_timesteps,)`
Returns
-------
:class:`float`
score
"""
eval_tsc_metric: pd.Series = self.tsc_metric(
y_true=y_true,
y_pred=y_pred,
sample_weight=self.metric_kwargs.get("sample_weight", None),
multioutput=self.metric_kwargs.get("multioutput", "uniform_average"),
)
eval_tsc_metric = series_if_applicable(eval_tsc_metric)
if isinstance(eval_tsc_metric, pd.DataFrame):
raise ValueError(
"The TSCMetric must be configured that multioutputs (multiple feature "
"columns) are weighted. Provide in 'multioutput' a string 'uniform' or "
"an array with individual weights. "
)
if sample_weight is None:
score = np.mean(eval_tsc_metric.to_numpy())
elif isinstance(sample_weight, np.ndarray):
assert len(sample_weight) == len(eval_tsc_metric)
score = np.average(eval_tsc_metric.to_numpy(), weights=sample_weight)
else:
raise TypeError(f"sample_weight={sample_weight} is invalid.")
if self.greater_is_better:
factor = 1
else:
factor = -1
return factor * float(score)
class TSCCrossValidationSplit(metaclass=abc.ABCMeta):
"""Abstract base class for cross validation splits for time series data.
This class mimics ```BaseCrossValidator``
<https://github.com/scikit-learn/scikit-learn/blob/2beed55847ee70d363bdbfe14ee4401438fba057/sklearn/model_selection/_split.py#L49>`__
(undocumented) from scikit-learn.
See sub-classes for details.
"""
@abc.abstractmethod
def split(self, X: TSCDataFrame, y=None, groups=None):
raise NotImplementedError("base class")
@abc.abstractmethod
def get_n_splits(self, X: Optional[TSCDataFrame] = None, y=None, groups=None):
raise NotImplementedError("base class")
class TSCKfoldSeries(TSCCrossValidationSplit):
"""K-fold splits on entire time series.
Both the training and the test set consist of time series in its original length.
Therefore, to perform the split, the time series collection must consist of
multiple time series.
Parameters
----------
n_splits
The number of splits.
shuffle
If True, the time series are shuffled.
random_state
Use fixed seed if `shuffle=True`.
"""
def __init__(
self, n_splits=3, shuffle: bool = False, random_state: Optional[int] = None
):
self.kfold_splitter = KFold(
n_splits=n_splits, shuffle=shuffle, random_state=random_state
)
def split(
self, X: TSCDataFrame, y=None, groups=None
) -> Generator[Tuple[np.ndarray, np.ndarray], None, None]:
"""Yields k-folds of training and test indices of time series collection.
Parameters
----------
X
The time series collection to split.
y: None
ignored
groups: None
ignored
Yields
------
numpy.ndarray
train indices
numpy.ndarray
test indices
Raises
------
NotImplementedError
If time series have not equal length.
"""
if not X.is_equal_length():
raise NotImplementedError(
"Currently, all time series are required to have the same length for "
"this method. This can be generalized, contributions welcome."
)
n_time_series = X.n_timeseries
len_time_series = X.n_timesteps
n_samples = X.shape[0]
indices_matrix = np.arange(n_samples).reshape([n_time_series, len_time_series])
# uses the indices as samples and splits along the time series
# the indices (rows) are then collected and can be used to select from X
for train, test in self.kfold_splitter.split(indices_matrix):
train_indices = indices_matrix[train].flatten()
test_indices = indices_matrix[test].flatten()
yield np.sort(train_indices), np.sort(test_indices)
def get_n_splits(self, X=None, y=None, groups=None) -> int:
"""Number of splits, which are also the number of cross-validation iterations.
All parameter are ignored to align with scikit-learn's function.
Parameters
----------
X
ignored
y
ignored
groups
ignored
Returns
-------
"""
return self.kfold_splitter.get_n_splits(X=X, y=y, groups=groups)
class TSCKFoldTime(TSCCrossValidationSplit):
"""K-fold splits on time values.
The splits are along the time axis. This means that the time series collection can
also consist of only a single time series. Note that if a block is taken from
testing, then this results in more training series as in the original time series
collection. For example, for a single time series this would result in two training
time series and one test time series.
Parameters
----------
n_splits
The number of splits.
"""
def __init__(self, n_splits: int = 3):
self.kfold_splitter = KFold(n_splits=n_splits, shuffle=False, random_state=None)
def split(self, X: TSCDataFrame, y=None, groups=None):
"""Yields k-folds of training and test indices of time series collection.
Parameters
----------
X
data to split
y: None
ignored
groups: None
ignored
Yields
------
numpy.ndarray
train indices
numpy.ndarray
test indices
"""
if not X.is_same_time_values():
raise NotImplementedError(
"Currently, each time series must have the same time indices."
)
n_samples = X.shape[0]
indices_matrix = np.arange(n_samples).reshape(
[X.n_timesteps, X.n_timeseries], order="F"
)
for train, test in self.kfold_splitter.split(indices_matrix):
train_indices = indices_matrix[train].flatten()
test_indices = indices_matrix[test].flatten()
yield np.sort(train_indices), np.sort(test_indices)
def get_n_splits(self, X=None, y=None, groups=None) -> int:
"""Number of splits, which are also the number of cross-validation iterations.
All parameter are ignored to align with scikit-learn's function.
Parameters
----------
X
ignored
y
ignored
groups
ignored
Returns
-------
"""
return self.kfold_splitter.get_n_splits(X=X, y=y, groups=groups)
class TSCWindowFoldTime(TSCCrossValidationSplit):
"""Assign windows of test samples starting from the end of the time series collection.
This method is useful for time series collections with gaps (time intervals of no
data). Specifically, the time series' time values should not overlap and be in
ordered with time (e.g. time series ID 0 should not start after ID 1).
The windows are set with these rules:
* The iteration is in reverse order, i.e., the first window for testing is set in
the last ID with the respective last time samples. The benefit is that when the
number of splits are reduced, then a optimization procedure uses the latest time
samples. This window has the most predictive power because it contains the most
recent samples.
* The window is always of the same size and only placed within a time series (i.e.
no overlapping. If the next window within a time series cannot be placed,
then these samples will not be included in any test set.
* If a time series has less samples than ``test_window_length``, then this time
series will not be considered for testing.
Parameters
----------
test_window_length
The length of a window for samples included in testing.
window_offset
The offset to next possible test window. In a single long time series the offset
equals the gap between windows.
train_min_timesteps
The minimum number of time steps required for training. If a time series has
less samples than the required minimum (e.g. because the time series is split
due to a test window) then these time series are dropped.
"""
def __init__(
self,
test_window_length: int,
window_offset: int = 0,
train_min_timesteps: Optional[int] = None,
):
if not is_integer(test_window_length) or test_window_length <= 0:
raise ValueError(
f"The parameter 'test_window_length={test_window_length}' must be a "
f"positive integer."
)
if not is_integer(window_offset) or window_offset < 0:
raise ValueError(
f"The parameter 'window_offset={window_offset}' must be a "
f"non-negative integer."
)
if train_min_timesteps is not None and (
not is_integer(train_min_timesteps) or train_min_timesteps <= 0
):
raise ValueError(
f"The parameter 'train_min_timesteps={train_min_timesteps}' "
f"must be a positive integer."
)
# parse to Python built-in integer (e.g. in case it is a numpy.integer)
self.test_window_length = int(test_window_length)
self.minimum_n_timesteps = (
int(train_min_timesteps) if train_min_timesteps is not None else None
)
self.test_offset = int(window_offset)
def _reversed_ids_and_indices_tsc(self, X: TSCDataFrame):
"""Create an TSCDataFrame with reversed order of samples and ids.
This is used internally to select the samples for training and testing.
"""
max_id = np.max(X.ids)
time_series_ids = np.abs(
X.index.get_level_values(TSCDataFrame.tsc_id_idx_name) - max_id
)
idx = pd.MultiIndex.from_arrays(
[np.sort(time_series_ids), np.arange(X.shape[0])]
)
indices_tsc = TSCDataFrame(index=idx)
indices_tsc.loc[:, "indices"] = np.arange(X.shape[0])[::-1]
return indices_tsc
def split(self, X: TSCDataFrame, y=None, groups=None):
"""Yield windows of indices for training and testing for a time series
collection with non-overlapping time series.
Parameters
----------
X
The data to split.
y
ignored
groups
ignored
Yields
------
numpy.ndarray
train indices
numpy.ndarray
test indices
"""
if np.asarray(X.n_timesteps < self.test_window_length).all():
raise ValueError(
"All time series are shorter than the set "
f"'test_window_length={self.test_window_length}'"
)
indices_tsc = self._reversed_ids_and_indices_tsc(X)
X.tsc.check_non_overlapping_timeseries()
X.tsc.check_const_time_delta()
for test_tsc in indices_tsc.copy().tsc.iter_timevalue_window(
window_size=self.test_window_length,
offset=self.test_window_length + self.test_offset,
per_time_series=True,
):
train_tsc = indices_tsc.copy().drop(test_tsc.index, axis=0)
# it is important to reassign the ids to keep the same sub-sampling and
# assign two IDs, if the test window is somewhere in between
# a longer time series.
train_tsc, test_tsc = indices_tsc.copy().tsc.assign_ids_train_test(
train_indices=train_tsc.time_values(),
test_indices=test_tsc.time_values(),
)
if self.minimum_n_timesteps is not None:
# see issue #106
# https://gitlab.com/datafold-dev/datafold/-/issues/106
n_timesteps = train_tsc.n_timesteps
if isinstance(n_timesteps, pd.Series):
drop_ids = n_timesteps[
train_tsc.n_timesteps < self.minimum_n_timesteps
].index
if len(drop_ids) > 0:
train_tsc = train_tsc.drop(
drop_ids if len(drop_ids) > 0 else None, level=0
)
else:
if n_timesteps < self.minimum_n_timesteps:
train_tsc = | pd.DataFrame() | pandas.DataFrame |
"""
Tests that rely on a server running
"""
import base64
import json
import datetime
import os
from unittest import mock
import pytest
from heavydb import connect, ProgrammingError, DatabaseError
from heavydb.cursor import Cursor
from heavydb._parsers import Description, ColumnDetails
from heavydb.thrift.ttypes import TDBException
from heavydb.common.ttypes import TDatumType
import geopandas as gpd
import pandas as pd
import numpy as np
import pyarrow as pa
from pandas.api.types import is_object_dtype, is_categorical_dtype
import pandas.testing as tm
import shapely
from shapely.geometry import Point, LineString, Polygon, MultiPolygon
import textwrap
from .conftest import no_gpu
from .data import dashboard_metadata
heavydb_host = os.environ.get('HEAVYDB_HOST', 'localhost')
# XXX: Make it hashable to silence warnings; see if this can be done upstream
# This isn't a huge deal, but our testing context mangers for asserting
# exceptions need hashability
TDBException.__hash__ = id
def _cursor2df(cursor):
col_types = {c.name: c.type_code for c in cursor.description}
has_geodata = {
k: v
in [
TDatumType.POINT,
TDatumType.LINESTRING,
TDatumType.POLYGON,
TDatumType.MULTIPOLYGON,
]
for k, v in col_types.items()
}
col_names = list(col_types.keys())
df_class = gpd.GeoDataFrame if any(has_geodata.values()) else pd.DataFrame
df = df_class(cursor.fetchall(), columns=col_names)
for c, _has_geodata in has_geodata.items():
if _has_geodata:
df.loc[:, c] = df.loc[:, c].apply(shapely.wkt.loads)
return df
@pytest.mark.usefixtures("mapd_server")
class TestIntegration:
def test_connect_binary(self):
con = connect(
user="admin",
password='<PASSWORD>',
host=heavydb_host,
port=6274,
protocol='binary',
dbname='omnisci',
)
assert con is not None
def test_connect_http(self):
con = connect(
user="admin",
password='<PASSWORD>',
host=heavydb_host,
port=6278,
protocol='http',
dbname='omnisci',
)
assert con is not None
def test_connect_uri(self):
uri = (
'heavydb://admin:HyperInteractive@{0}:6274/omnisci?'
'protocol=binary'.format(heavydb_host)
)
con = connect(uri=uri)
assert con._user == 'admin'
assert con._password == '<PASSWORD>'
assert con._host == heavydb_host
assert con._port == 6274
assert con._dbname == 'omnisci'
assert con._protocol == 'binary'
def test_connect_uri_and_others_raises(self):
uri = (
'heavydb://admin:HyperInteractive@{0}:6274/heavyai?'
'protocol=binary'.format(heavydb_host)
)
with pytest.raises(TypeError):
connect(username='heavydb', uri=uri)
def test_invalid_sql(self, con):
with pytest.raises(ProgrammingError) as r:
con.cursor().execute("this is invalid;")
return r.match("SQL Error:")
def test_nonexistant_table(self, con):
with pytest.raises(DatabaseError) as r:
con.cursor().execute("select it from fake_table;")
r.match("Table 'FAKE_TABLE' does not exist|Object 'fake_table' not")
def test_connection_execute(self, con):
result = con.execute("drop table if exists FOO;")
result = con.execute("create table FOO (a int);")
assert isinstance(result, Cursor)
con.execute("drop table if exists FOO;")
def test_select_sets_description(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute("select * from stocks")
expected = [
Description('date_', 6, None, None, None, None, True),
Description('trans', 6, None, None, None, None, True),
Description('symbol', 6, None, None, None, None, True),
Description('qty', 1, None, None, None, None, True),
Description('price', 3, None, None, None, None, True),
Description('vol', 3, None, None, None, None, True),
]
assert c.description == expected
c.execute('drop table if exists stocks;')
def test_select_parametrized(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute(
'select symbol, qty from stocks where symbol = :symbol',
{'symbol': 'GOOG'},
)
result = list(c)
expected = [
('GOOG', 100),
] # noqa
assert result == expected
c.execute('drop table if exists stocks;')
def test_executemany_parametrized(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
parameters = [{'symbol': 'GOOG'}, {'symbol': "RHAT"}]
expected = [[('GOOG', 100)], [('RHAT', 100)]]
query = 'select symbol, qty from stocks where symbol = :symbol'
c = con.cursor()
result = c.executemany(query, parameters)
assert result == expected
c.execute('drop table if exists stocks;')
def test_executemany_parametrized_insert(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c = con.cursor()
c.execute("drop table if exists stocks2;")
# Create table
c.execute('CREATE TABLE stocks2 (symbol text, qty int);')
params = [{"symbol": "GOOG", "qty": 10}, {"symbol": "AAPL", "qty": 20}]
query = "INSERT INTO stocks2 VALUES (:symbol, :qty);"
result = c.executemany(query, params)
assert result == [[], []] # TODO: not sure if this is standard
c.execute("drop table stocks2;")
c.execute('drop table if exists stocks;')
@pytest.mark.parametrize(
'query, parameters',
[
('select qty, price from stocks', None),
('select qty, price from stocks where qty=:qty', {'qty': 100}),
],
)
def test_select_ipc_parametrized(self, con, query, parameters):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
result = con.select_ipc(query, parameters=parameters)
expected = pd.DataFrame(
{
"qty": np.array([100, 100], dtype=np.int32),
"price": np.array(
[35.13999938964844, 12.140000343322754], dtype=np.float32
),
}
)[['qty', 'price']]
tm.assert_frame_equal(result, expected)
c.execute('drop table if exists stocks;')
def test_select_ipc_first_n(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
result = con.select_ipc("select * from stocks", first_n=1)
assert len(result) == 1
c.execute('drop table if exists stocks;')
@pytest.mark.parametrize(
'query, parameters',
[
('select qty, price from stocks', None),
('select qty, price from stocks where qty=:qty', {'qty': 100}),
],
)
@pytest.mark.skipif(no_gpu(), reason="No GPU available")
def test_select_ipc_gpu(self, con, query, parameters):
from cudf.core.dataframe import DataFrame
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
result = con.select_ipc_gpu("select qty, price from stocks")
assert isinstance(result, DataFrame)
dtypes = dict(qty=np.int32, price=np.float32)
expected = pd.DataFrame(
[[100, 35.14], [100, 12.14]], columns=['qty', 'price']
).astype(dtypes)
result = result.to_pandas()[['qty', 'price']] # column order
pd.testing.assert_frame_equal(result, expected)
c.execute('drop table if exists stocks;')
@pytest.mark.skipif(no_gpu(), reason="No GPU available")
def test_select_text_ipc_gpu(self, con):
from cudf.core.dataframe import DataFrame
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
symbols = set(['GOOG', 'RHAT', 'IBM', 'NVDA'])
for i, sym in enumerate(symbols):
stmt = "INSERT INTO stocks VALUES ('2006-01-05_{}','BUY','{}',{},35.{},{}.1);".format( # noqa
i, sym, i, i, i
) # noqa
# insert twice so we can test
# that duplicated text values
# are deserialized properly
c.execute(stmt)
c.execute(stmt)
result = con.select_ipc_gpu(
"select trans, symbol, qty, price from stocks"
) # noqa
assert isinstance(result, DataFrame)
assert len(result) == 8
assert set(result['trans'].to_pandas()) == set(["BUY"])
assert set(result['symbol'].to_pandas()) == symbols
c.execute('drop table if exists stocks;')
@pytest.mark.skipif(no_gpu(), reason="No GPU available")
def test_select_gpu_first_n(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
result = con.select_ipc_gpu("select * from stocks", first_n=1)
assert len(result) == 1
c.execute('drop table if exists stocks;')
def test_fetchone(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute("select symbol, qty from stocks")
result = c.fetchone()
expected = ('RHAT', 100)
assert result == expected
c.execute('drop table if exists stocks;')
def test_fetchmany(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute("select symbol, qty from stocks")
result = c.fetchmany()
expected = [('RHAT', 100)]
assert result == expected
c.execute("select symbol, qty from stocks")
result = c.fetchmany(size=10)
expected = [('RHAT', 100), ('GOOG', 100)]
assert result == expected
c.execute('drop table if exists stocks;')
def test_select_dates(self, con):
c = con.cursor()
c.execute('drop table if exists dates;')
c.execute(
'create table dates (date_ DATE, datetime_ TIMESTAMP, '
'time_ TIME);'
)
i1 = (
"INSERT INTO dates VALUES ('2006-01-05','2006-01-01T12:00:00',"
"'12:00:00');"
)
i2 = (
"INSERT INTO dates VALUES ('1901-12-14','1901-12-13T20:45:53',"
"'23:59:00');"
)
c.execute(i1)
c.execute(i2)
result = list(c.execute("select * from dates"))
expected = [
(
datetime.date(2006, 1, 5),
datetime.datetime(2006, 1, 1, 12),
datetime.time(12),
),
(
datetime.date(1901, 12, 14),
datetime.datetime(1901, 12, 13, 20, 45, 53),
datetime.time(23, 59),
),
]
assert result == expected
c.execute('drop table if exists dates;')
class TestOptionalImports:
def test_select_gpu(self, con):
with mock.patch.dict(
"sys.modules", {"cudf": None, "cudf.core.dataframe": None}
):
with pytest.raises(ImportError) as m:
con.select_ipc_gpu("select * from foo;")
assert m.match("The 'cudf' package is required")
class TestExtras:
def test_get_tables(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
result = con.get_tables()
assert isinstance(result, list)
assert 'stocks' in result
c.execute('drop table if exists stocks;')
def test_get_table_details(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float, '
'exchanges TEXT [] ENCODING DICT(32));'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1,{'NYSE', 'NASDAQ', 'AMEX'});" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2,{'NYSE', 'NASDAQ'});" # noqa
c.execute(i1)
c.execute(i2)
result = con.get_table_details('stocks')
expected = [
ColumnDetails(
name='date_',
type='STR',
nullable=True,
precision=0,
scale=0,
comp_param=32,
encoding='DICT',
is_array=False,
),
ColumnDetails(
name='trans',
type='STR',
nullable=True,
precision=0,
scale=0,
comp_param=32,
encoding='DICT',
is_array=False,
),
ColumnDetails(
name='symbol',
type='STR',
nullable=True,
precision=0,
scale=0,
comp_param=32,
encoding='DICT',
is_array=False,
),
ColumnDetails(
name='qty',
type='INT',
nullable=True,
precision=0,
scale=0,
comp_param=0,
encoding='NONE',
is_array=False,
),
ColumnDetails(
name='price',
type='FLOAT',
nullable=True,
precision=0,
scale=0,
comp_param=0,
encoding='NONE',
is_array=False,
),
ColumnDetails(
name='vol',
type='FLOAT',
nullable=True,
precision=0,
scale=0,
comp_param=0,
encoding='NONE',
is_array=False,
),
ColumnDetails(
name='exchanges',
type='STR',
nullable=True,
precision=0,
scale=0,
comp_param=32,
encoding='DICT',
is_array=True,
),
]
assert result == expected
c.execute('drop table if exists stocks;')
class TestLoaders:
@staticmethod
def check_empty_insert(result, expected):
assert len(result) == 3
assert expected[0][0] == result[0][0]
assert expected[0][2] == result[0][2]
assert abs(expected[0][1] - result[0][1]) < 1e-7 # floating point
def test_load_empty_table(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
data = [(1, 1.1, 'a'), (2, 2.2, '2'), (3, 3.3, '3')]
con.load_table("baz", data)
result = sorted(con.execute("select * from baz"))
self.check_empty_insert(result, data)
con.execute("drop table if exists baz;")
def test_load_empty_table_pandas(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
data = [(1, 1.1, 'a'), (2, 2.2, '2'), (3, 3.3, '3')]
df = pd.DataFrame(data, columns=list('abc'))
con.load_table("baz", df, method='columnar')
result = sorted(con.execute("select * from baz"))
self.check_empty_insert(result, data)
con.execute("drop table if exists baz;")
def test_load_empty_table_arrow(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
data = [(1, 1.1, 'a'), (2, 2.2, '2'), (3, 3.3, '3')]
df = pd.DataFrame(data, columns=list('abc')).astype(
{'a': 'int32', 'b': 'float32'}
)
table = pa.Table.from_pandas(df, preserve_index=False)
con.load_table("baz", table, method='arrow')
result = sorted(con.execute("select * from baz"))
self.check_empty_insert(result, data)
con.execute("drop table if exists baz;")
@pytest.mark.parametrize(
'df, table_fields',
[
pytest.param(
pd.DataFrame(
{
"a": [1, 2, 3],
"b": [1.1, 2.2, 3.3],
"c": ['a', '2', '3'],
},
),
'a int, b float, c text',
id='scalar_values',
),
pytest.param(
pd.DataFrame(
{
"a": [
np.datetime64('2010-01-01 01:01:01.001001001'),
np.datetime64('2011-01-01 01:01:01.001001001'),
np.datetime64('2012-01-01 01:01:01.001001001'),
],
},
),
'a TIMESTAMP(9)',
id='scalar_datetime_nanoseconds',
),
pytest.param(
pd.DataFrame(
{
"a": [
datetime.datetime.fromtimestamp(
float(1600443582510) / 1e3
),
datetime.datetime.fromtimestamp(
float(1600443582510) / 1e3
),
datetime.datetime.fromtimestamp(
float(1600443582510) / 1e3
),
],
},
),
'a TIMESTAMP(3)',
id='scalar_datetime_ms',
),
pytest.param(
pd.DataFrame(
{
"a": [
datetime.datetime.fromtimestamp(
float(1600443582510) / 1e6
),
datetime.datetime.fromtimestamp(
float(1600443582510) / 1e6
),
datetime.datetime.fromtimestamp(
float(1600443582510) / 1e6
),
],
},
),
'a TIMESTAMP(6)',
id='scalar_datetime_us',
),
pytest.param(
pd.DataFrame(
[
{'ary': [2, 3, 4]},
{'ary': [4444]},
{'ary': []},
{'ary': None},
{'ary': [2, 3, 4]},
]
),
'ary INT[]',
id='array_values',
),
pytest.param(
pd.DataFrame(
[
{'ary': [2, 3, 4], 'strtest': 'teststr'},
{'ary': None, 'strtest': 'teststr'},
{'ary': [4444], 'strtest': 'teststr'},
{'ary': [], 'strtest': 'teststr'},
{'ary': [2, 3, 4], 'strtest': 'teststr'},
]
),
'ary INT[], strtest TEXT',
id='mix_scalar_array_values_with_none_and_empty_list',
),
pytest.param(
gpd.GeoDataFrame(
{
'a': [Point(0, 0), Point(1, 1)],
'b': [
LineString([(2, 0), (2, 4), (3, 4)]),
LineString([(0, 0), (1, 1)]),
],
'c': [
Polygon([(0, 0), (1, 0), (0, 1), (0, 0)]),
Polygon([(0, 0), (4, 0), (4, 4), (0, 4), (0, 0)]),
],
'd': [
MultiPolygon(
[
Polygon([(0, 0), (1, 0), (0, 1), (0, 0)]),
Polygon(
[
(0, 0),
(4, 0),
(4, 4),
(0, 4),
(0, 0),
]
),
]
),
MultiPolygon(
[
Polygon(
[
(0, 0),
(4, 0),
(4, 4),
(0, 4),
(0, 0),
]
),
Polygon([(0, 0), (1, 0), (0, 1), (0, 0)]),
]
),
],
}
),
'a POINT, b LINESTRING, c POLYGON, d MULTIPOLYGON',
id='geo_values',
),
],
)
def test_load_table_columnar(self, con, tmp_table, df, table_fields):
con.execute("create table {} ({});".format(tmp_table, table_fields))
con.load_table_columnar(tmp_table, df)
result = _cursor2df(con.execute('select * from {}'.format(tmp_table)))
pd.testing.assert_frame_equal(df, result)
def test_load_infer(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
data = pd.DataFrame(
{
'a': np.array([0, 1], dtype=np.int32),
'b': np.array([1.1, 2.2], dtype=np.float32),
'c': ['a', 'b'],
}
)
con.load_table("baz", data)
con.execute("drop table if exists baz;")
def test_load_infer_bad(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
with pytest.raises(TypeError):
con.load_table("baz", [], method='thing')
con.execute("drop table if exists baz;")
def test_infer_non_pandas(self, con):
con.execute("drop table if exists baz;")
con.execute("create table baz (a int, b float, c text);")
with pytest.raises(TypeError):
con.load_table("baz", [], method='columnar')
con.execute("drop table if exists baz;")
def test_load_columnar_pandas_all(self, con):
c = con.cursor()
c.execute('drop table if exists all_types;')
create = textwrap.dedent(
'''\
create table all_types (
boolean_ BOOLEAN,
smallint_ SMALLINT,
int_ INT,
bigint_ BIGINT,
float_ FLOAT,
double_ DOUBLE,
varchar_ VARCHAR(40),
text_ TEXT,
time_ TIME,
timestamp_ TIMESTAMP,
date_ DATE
);'''
)
# skipping decimal for now
c.execute(create)
data = pd.DataFrame(
{
"boolean_": [True, False, True, False],
"smallint_": np.array([0, 1, 0, 1], dtype=np.int16),
"int_": np.array([0, 1, 0, 1], dtype=np.int32),
"bigint_": np.array([0, 1, 0, 1], dtype=np.int64),
"float_": np.array([0, 1, 0, 1], dtype=np.float32),
"double_": np.array([0, 1, 0, 1], dtype=np.float64),
"varchar_": ["a", "b", "a", "b"],
"text_": ['a', 'b', 'a', 'b'],
"time_": [
datetime.time(0, 11, 59),
datetime.time(13),
datetime.time(22, 58, 59),
datetime.time(7, 13, 43),
],
"timestamp_": [
pd.Timestamp("2016"),
pd.Timestamp("2017"),
pd.Timestamp(
'2017-11-28 23:55:59.342380', tz='US/Eastern'
),
pd.Timestamp(
'2018-11-28 23:55:59.342380', tz='Asia/Calcutta'
),
],
"date_": [
datetime.date(2016, 1, 1),
datetime.date(2017, 1, 1),
datetime.date(2017, 11, 28),
datetime.date(2018, 11, 28),
],
},
columns=[
'boolean_',
'smallint_',
'int_',
'bigint_',
'float_',
'double_',
'varchar_',
'text_',
'time_',
'timestamp_',
'date_',
],
)
con.load_table_columnar("all_types", data, preserve_index=False)
result = list(c.execute("select * from all_types"))
expected = [
(
1,
0,
0,
0,
0.0,
0.0,
'a',
'a',
datetime.time(0, 11, 59),
datetime.datetime(2016, 1, 1, 0, 0),
datetime.date(2016, 1, 1),
),
(
0,
1,
1,
1,
1.0,
1.0,
'b',
'b',
datetime.time(13, 0),
datetime.datetime(2017, 1, 1, 0, 0),
datetime.date(2017, 1, 1),
),
(
1,
0,
0,
0,
0.0,
0.0,
'a',
'a',
datetime.time(22, 58, 59),
datetime.datetime(2017, 11, 29, 4, 55, 59),
datetime.date(2017, 11, 28),
),
(
0,
1,
1,
1,
1.0,
1.0,
'b',
'b',
datetime.time(7, 13, 43),
datetime.datetime(2018, 11, 28, 18, 25, 59),
datetime.date(2018, 11, 28),
),
]
assert result == expected
c.execute('drop table if exists all_types;')
def test_load_table_columnar_arrow_all(self, con):
c = con.cursor()
c.execute('drop table if exists all_types;')
create = textwrap.dedent(
'''\
create table all_types (
boolean_ BOOLEAN,
smallint_ SMALLINT,
int_ INT,
bigint_ BIGINT,
float_ FLOAT,
double_ DOUBLE,
varchar_ VARCHAR(40),
text_ TEXT,
time_ TIME,
timestamp_ TIMESTAMP,
date_ DATE
);'''
)
# skipping decimal for now
c.execute(create)
names = [
'boolean_',
'smallint_',
'int_',
'bigint_',
'float_',
'double_',
'varchar_',
'text_',
'time_',
'timestamp_',
'date_',
]
columns = [
pa.array([True, False, None], type=pa.bool_()),
pa.array([1, 0, None]).cast(pa.int16()),
pa.array([1, 0, None]).cast(pa.int32()),
pa.array([1, 0, None]),
pa.array([1.0, 1.1, None]).cast(pa.float32()),
pa.array([1.0, 1.1, None]),
# no fixed-width string
pa.array(['a', 'b', None]),
pa.array(['a', 'b', None]),
(pa.array([1, 2, None]).cast(pa.int32()).cast(pa.time32('s'))),
pa.array(
[
datetime.datetime(2016, 1, 1, 12, 12, 12),
datetime.datetime(2017, 1, 1),
None,
]
),
pa.array(
[datetime.date(2016, 1, 1), datetime.date(2017, 1, 1), None]
),
]
table = pa.Table.from_arrays(columns, names=names)
con.load_table_arrow("all_types", table)
c.execute('drop table if exists all_types;')
def test_select_null(self, con):
con.execute("drop table if exists pymapd_test_table;")
con.execute("create table pymapd_test_table (a int);")
con.execute("insert into pymapd_test_table VALUES (1);")
con.execute("insert into pymapd_test_table VALUES (null);")
# the test
c = con.cursor()
result = c.execute("select * from pymapd_test_table")
expected = [(1,), (None,)]
assert result.fetchall() == expected
# cleanup
con.execute("drop table if exists pymapd_test_table;")
@pytest.mark.parametrize(
'df, expected',
[
(
pd.DataFrame(
{
"a": [1, 2],
"b": [1.0, 2.0],
"c": [
datetime.date(2016, 1, 1),
datetime.date(2017, 1, 1),
],
"d": [
np.datetime64("2010-01-01T01:01:01.001001001"),
np.datetime64("2011-01-01T01:01:01.001001001"),
],
}
),
{
'a': {'type': 'BIGINT', 'is_array': False},
'b': {'type': 'DOUBLE', 'is_array': False},
'c': {'type': 'DATE', 'is_array': False},
'd': {
'type': 'TIMESTAMP',
'is_array': False,
'precision': 9,
},
},
),
(
pd.DataFrame(
{
'a': [[1, 2], [1, 2], None, []],
'b': ['A', 'B', 'C', 'D'],
'c': [[1.0, 2.2], [1.0, 2.2], [], None],
'd': [
[
9007199254740991,
9007199254740992,
9007199254740993,
],
[],
None,
[
9007199254740994,
9007199254740995,
9007199254740996,
],
],
}
),
{
'a': {'type': 'BIGINT', 'is_array': True},
'b': {'type': 'STR', 'is_array': False},
'c': {'type': 'DOUBLE', 'is_array': True},
'd': {'type': 'BIGINT', 'is_array': True},
},
),
(
gpd.GeoDataFrame(
{
'a': [Point(0, 0), Point(1, 1)],
'b': [
LineString([(2, 0), (2, 4), (3, 4)]),
LineString([(0, 0), (1, 1)]),
],
'c': [
Polygon([(0, 0), (1, 0), (0, 1), (0, 0)]),
Polygon([(0, 0), (4, 0), (4, 4), (0, 4), (0, 0)]),
],
'd': [
MultiPolygon(
[
Polygon([(0, 0), (1, 0), (0, 1), (0, 0)]),
Polygon(
[
(0, 0),
(4, 0),
(4, 4),
(0, 4),
(0, 0),
]
),
]
),
MultiPolygon(
[
Polygon(
[
(0, 0),
(4, 0),
(4, 4),
(0, 4),
(0, 0),
]
),
Polygon([(0, 0), (1, 0), (0, 1), (0, 0)]),
]
),
],
}
),
{
'a': {'type': 'POINT', 'is_array': True},
'b': {'type': 'LINESTRING', 'is_array': True},
'c': {'type': 'POLYGON', 'is_array': True},
'd': {'type': 'MULTIPOLYGON', 'is_array': True},
},
),
],
)
def test_create_table(self, con, tmp_table, df, expected):
con.create_table(tmp_table, df)
for col in con.get_table_details(tmp_table):
assert expected[col.name]['type'] == col.type
if 'precision' in expected[col.name]:
assert expected[col.name]['precision'] == col.precision
def test_load_table_creates(self, con):
data = pd.DataFrame(
{
"boolean_": [True, False],
"smallint_cast": np.array([0, 1], dtype=np.int8),
"smallint_": np.array([0, 1], dtype=np.int16),
"int_": np.array([0, 1], dtype=np.int32),
"bigint_": np.array([0, 1], dtype=np.int64),
"float_": np.array([0, 1], dtype=np.float32),
"double_": np.array([0, 1], dtype=np.float64),
"varchar_": ["a", "b"],
"text_": ['a', 'b'],
"time_": [datetime.time(0, 11, 59), datetime.time(13)],
"timestamp1_": [pd.Timestamp("2016"), pd.Timestamp("2017")],
"timestamp2_": [
np.datetime64("2010-01-01T01:01:01.001001001"),
np.datetime64("2011-01-01T01:01:01.001001001"),
],
"date_": [
datetime.date(2016, 1, 1),
datetime.date(2017, 1, 1),
],
},
columns=[
'boolean_',
'smallint_',
'int_',
'bigint_',
'float_',
'double_',
'varchar_',
'text_',
'time_',
'timestamp1_',
'timestamp2_',
'date_',
],
)
con.execute("drop table if exists test_load_table_creates;")
con.load_table("test_load_table_creates", data, create=True)
con.execute("drop table if exists test_load_table_creates;")
def test_array_in_result_set(self, con):
# text
con.execute("DROP TABLE IF EXISTS test_lists;")
con.execute(
"CREATE TABLE IF NOT EXISTS test_lists \
(col1 TEXT, col2 TEXT[]);"
)
row = [
("row1", "{hello,goodbye,aloha}"),
("row2", "{hello2,goodbye2,aloha2}"),
]
con.load_table_rowwise("test_lists", row)
ans = con.execute("select * from test_lists").fetchall()
expected = [
('row1', ['hello', 'goodbye', 'aloha']),
('row2', ['hello2', 'goodbye2', 'aloha2']),
]
assert ans == expected
# int
con.execute("DROP TABLE IF EXISTS test_lists;")
con.execute(
"CREATE TABLE IF NOT EXISTS test_lists \
(col1 TEXT, col2 INT[]);"
)
row = [("row1", "{10,20,30}"), ("row2", "{40,50,60}")]
con.load_table_rowwise("test_lists", row)
ans = con.execute("select * from test_lists").fetchall()
expected = [('row1', [10, 20, 30]), ('row2', [40, 50, 60])]
assert ans == expected
# timestamp
con.execute("DROP TABLE IF EXISTS test_lists;")
con.execute(
"CREATE TABLE IF NOT EXISTS test_lists \
(col1 TEXT, col2 TIMESTAMP[], col3 TIMESTAMP(9));"
)
row = [
(
"row1",
"{2019-03-02 00:00:00,2019-03-02 00:00:00,2019-03-02 00:00:00}", # noqa
"2010-01-01T01:01:01.001001001",
),
(
"row2",
"{2019-03-02 00:00:00,2019-03-02 00:00:00,2019-03-02 00:00:00}", # noqa
"2011-01-01T01:01:01.001001001",
),
]
con.load_table_rowwise("test_lists", row)
ans = con.execute("select * from test_lists").fetchall()
expected = [
(
'row1',
[
datetime.datetime(2019, 3, 2, 0, 0),
datetime.datetime(2019, 3, 2, 0, 0),
datetime.datetime(2019, 3, 2, 0, 0),
],
np.datetime64("2010-01-01T01:01:01.001001001"),
),
(
'row2',
[
datetime.datetime(2019, 3, 2, 0, 0),
datetime.datetime(2019, 3, 2, 0, 0),
datetime.datetime(2019, 3, 2, 0, 0),
],
np.datetime64("2011-01-01T01:01:01.001001001"),
),
]
assert ans == expected
# date
con.execute("DROP TABLE IF EXISTS test_lists;")
con.execute(
"CREATE TABLE IF NOT EXISTS test_lists \
(col1 TEXT, col2 DATE[]);"
)
row = [
("row1", "{2019-03-02,2019-03-02,2019-03-02}"),
("row2", "{2019-03-02,2019-03-02,2019-03-02}"),
]
con.load_table_rowwise("test_lists", row)
ans = con.execute("select * from test_lists").fetchall()
expected = [
(
'row1',
[
datetime.date(2019, 3, 2),
datetime.date(2019, 3, 2),
datetime.date(2019, 3, 2),
],
),
(
'row2',
[
datetime.date(2019, 3, 2),
datetime.date(2019, 3, 2),
datetime.date(2019, 3, 2),
],
),
]
assert ans == expected
# time
con.execute("DROP TABLE IF EXISTS test_lists;")
con.execute(
"CREATE TABLE IF NOT EXISTS test_lists \
(col1 TEXT, col2 TIME[]);"
)
row = [
("row1", "{23:59:00,23:59:00,23:59:00}"),
("row2", "{23:59:00,23:59:00,23:59:00}"),
]
con.load_table_rowwise("test_lists", row)
ans = con.execute("select * from test_lists").fetchall()
expected = [
(
'row1',
[
datetime.time(23, 59),
datetime.time(23, 59),
datetime.time(23, 59),
],
),
(
'row2',
[
datetime.time(23, 59),
datetime.time(23, 59),
datetime.time(23, 59),
],
),
]
assert ans == expected
con.execute("DROP TABLE IF EXISTS test_lists;")
def test_upload_pandas_categorical_ipc(self, con):
con.execute("DROP TABLE IF EXISTS test_categorical;")
df = pd.DataFrame({"A": ["a", "b", "c", "a"]})
df["B"] = df["A"].astype('category')
# test that table created correctly when it doesn't exist on server
con.load_table("test_categorical", df)
ans = con.execute("select * from test_categorical").fetchall()
assert ans == [('a', 'a'), ('b', 'b'), ('c', 'c'), ('a', 'a')]
assert con.get_table_details("test_categorical") == [
ColumnDetails(
name='A',
type='STR',
nullable=True,
precision=0,
scale=0,
comp_param=32,
encoding='DICT',
is_array=False,
),
ColumnDetails(
name='B',
type='STR',
nullable=True,
precision=0,
scale=0,
comp_param=32,
encoding='DICT',
is_array=False,
),
]
# load row-wise
con.load_table("test_categorical", df, method="rows")
# load columnar
con.load_table("test_categorical", df, method="columnar")
# load arrow
con.load_table("test_categorical", df, method="arrow")
# test end result
df_ipc = con.select_ipc("select * from test_categorical")
assert df_ipc.shape == (16, 2)
res = df.append([df, df, df]).reset_index(drop=True)
res["A"] = res["A"].astype('category')
res["B"] = res["B"].astype('category')
assert pd.DataFrame.equals(df_ipc, res)
# test that input df wasn't mutated
# original input is object, categorical
# to load via Arrow, converted internally to object, object
assert | is_object_dtype(df["A"]) | pandas.api.types.is_object_dtype |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# coding: utf-8
# # TorchArrow in 10 minutes
#
# TorchArrow is a torch.Tensor-like Python DataFrame library for data preprocessing in deep learning. It supports multiple execution runtimes and Arrow as a common memory format.
#
# (Remark. In case the following looks familiar, it is with gratitude that portions of this tutorial were borrowed and adapted from the 10 Minutes to Pandas (and CuDF) tutorial.)
#
#
# The TorchArrow library consists of 3 parts:
#
# * *DTypes* define *Schema*, *Fields*, primitive and composite *Types*.
# * *Columns* defines sequences of strongly typed data with vectorized operations.
# * *Dataframes* are sequences of named and typed columns of same length with relational operations.
#
# Let's get started...
# In[1]:
# ## Constructing data: Columns
#
# ### From Pandas to TorchArrow
# To start let's create a Panda series and a TorchArrow column and compare them:
# In[2]:
import pandas as pd
import torcharrow as ta
import torcharrow.dtypes as dt
| pd.Series([1, 2, None, 4]) | pandas.Series |
from glob import glob
import pandas as pd
from sklearn.ensemble.forest import RandomForestRegressor
from tqdm import tqdm
from util import COUPLING_TYPES
def main():
predictions = []
for coupling_type in COUPLING_TYPES:
predictions.extend(process_coupling_type(coupling_type))
print('writing predictions')
predictions = pd.DataFrame(predictions)[['id', 'scalar_coupling_constant']]
predictions.to_csv('data/submission_features3.csv', index=False)
def load_partitions(name, coupling_type):
print('loading', name, coupling_type)
features = pd.concat(
[ | pd.read_pickle(path) | pandas.read_pickle |
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import enum
import functools
import itertools
from typing import Callable, Union
import dask.dataframe as dd
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
try:
import cudf
import cupy as cp
import dask_cudf
from cudf.core.column import as_column, build_column
from cudf.utils.dtypes import is_list_dtype, is_string_dtype
HAS_GPU = True
except ImportError:
HAS_GPU = False
cp = None
cudf = None
try:
# Dask >= 2021.5.1
from dask.dataframe.core import hash_object_dispatch
except ImportError:
# Dask < 2021.5.1
from dask.dataframe.utils import hash_object_dispatch
try:
import nvtx
annotate = nvtx.annotate
except ImportError:
# don't have nvtx installed - don't annotate our functions
def annotate(*args, **kwargs):
def inner1(func):
@functools.wraps(func)
def inner2(*args, **kwargs):
return func(*args, **kwargs)
return inner2
return inner1
if HAS_GPU:
DataFrameType = Union[pd.DataFrame, cudf.DataFrame]
SeriesType = Union[pd.Series, cudf.Series]
else:
DataFrameType = Union[pd.DataFrame]
SeriesType = Union[pd.Series]
class ExtData(enum.Enum):
"""Simple Enum to track external-data types"""
DATASET = 0
ARROW = 1
CUDF = 2
PANDAS = 3
DASK_CUDF = 4
DASK_PANDAS = 5
PARQUET = 6
CSV = 7
def get_lib():
return cudf if HAS_GPU else pd
def _is_dataframe_object(x):
# Simple check if object is a cudf or pandas
# DataFrame object
if not HAS_GPU:
return isinstance(x, pd.DataFrame)
return isinstance(x, (cudf.DataFrame, pd.DataFrame))
def _is_series_object(x):
# Simple check if object is a cudf or pandas
# Series object
if not HAS_GPU:
return isinstance(x, pd.Series)
return isinstance(x, (cudf.Series, pd.Series))
def _is_cpu_object(x):
# Simple check if object is a cudf or pandas
# DataFrame object
return isinstance(x, (pd.DataFrame, pd.Series))
def is_series_or_dataframe_object(maybe_series_or_df):
return _is_series_object(maybe_series_or_df) or _is_dataframe_object(maybe_series_or_df)
def _hex_to_int(s, dtype=None):
def _pd_convert_hex(x):
if pd.isnull(x):
return pd.NA
return int(x, 16)
if isinstance(s, pd.Series):
# Pandas Version
if s.dtype == "object":
s = s.apply(_pd_convert_hex)
return s.astype("Int64").astype(dtype or "Int32")
else:
# CuDF Version
if s.dtype == "object":
s = s.str.htoi()
return s.astype(dtype or np.int32)
def _random_state(seed, like_df=None):
"""Dispatch for numpy.random.RandomState"""
if not HAS_GPU or isinstance(like_df, (pd.DataFrame, pd.Series)):
return np.random.RandomState(seed)
else:
return cp.random.RandomState(seed)
def _arange(size, like_df=None, dtype=None):
"""Dispatch for numpy.arange"""
if not HAS_GPU or isinstance(like_df, (np.ndarray, pd.DataFrame, pd.Series)):
return np.arange(size, dtype=dtype)
else:
return cp.arange(size, dtype=dtype)
def _array(x, like_df=None, dtype=None):
"""Dispatch for numpy.array"""
if not HAS_GPU or isinstance(like_df, (np.ndarray, pd.DataFrame, pd.Series)):
return np.array(x, dtype=dtype)
else:
return cp.array(x, dtype=dtype)
def _zeros(size, like_df=None, dtype=None):
"""Dispatch for numpy.array"""
if not HAS_GPU or isinstance(like_df, (np.ndarray, pd.DataFrame, pd.Series)):
return np.zeros(size, dtype=dtype)
else:
return cp.zeros(size, dtype=dtype)
def _hash_series(s):
"""Row-wise Series hash"""
if not HAS_GPU or isinstance(s, pd.Series):
# Using pandas hashing, which does not produce the
# same result as cudf.Series.hash_values(). Do not
# expect hash-based data transformations to be the
# same on CPU and CPU. TODO: Fix this (maybe use
# murmurhash3 manually on CPU).
return hash_object_dispatch(s).values
else:
if _is_list_dtype(s):
return s.list.leaves.hash_values()
else:
return s.hash_values()
def _natural_log(df):
"""Natural logarithm of all columns in a DataFrame"""
if isinstance(df, pd.DataFrame):
return pd.DataFrame(np.log(df.values), columns=df.columns, index=df.index)
else:
return df.log()
def _series_has_nulls(s):
"""Check if Series contains any null values"""
if isinstance(s, pd.Series):
return s.isnull().values.any()
else:
return s._column.has_nulls
def _is_list_dtype(ser):
"""Check if Series contains list elements"""
if not HAS_GPU or isinstance(ser, pd.Series):
if not len(ser): # pylint: disable=len-as-condition
return False
return pd.api.types.is_list_like(ser.values[0])
return is_list_dtype(ser)
def _is_string_dtype(obj):
if not HAS_GPU:
return pd.api.types.is_string_dtype(obj)
else:
return is_string_dtype(obj)
def _flatten_list_column(s):
"""Flatten elements of a list-based column"""
if isinstance(s, pd.Series):
return pd.DataFrame({s.name: itertools.chain(*s)})
else:
return cudf.DataFrame({s.name: s.list.leaves})
def _concat_columns(args: list):
"""Dispatch function to concatenate DataFrames with axis=1"""
if len(args) == 1:
return args[0]
else:
_lib = cudf if HAS_GPU and isinstance(args[0], cudf.DataFrame) else pd
return _lib.concat(
[a.reset_index(drop=True) for a in args],
axis=1,
)
return None
def _read_parquet_dispatch(df: DataFrameType) -> Callable:
return _read_dispatch(df=df, fmt="parquet")
def _read_dispatch(df: DataFrameType = None, cpu=None, collection=False, fmt="parquet") -> Callable:
"""Return the necessary read_parquet function to generate
data of a specified type.
"""
if cpu or isinstance(df, pd.DataFrame) or not HAS_GPU:
_mod = dd if collection else pd
else:
_mod = dask_cudf if collection else cudf.io
_attr = "read_csv" if fmt == "csv" else "read_parquet"
return getattr(_mod, _attr)
def _parquet_writer_dispatch(df: DataFrameType, path=None, **kwargs):
"""Return the necessary ParquetWriter class to write
data of a specified type.
If `path` is specified, an initialized `ParquetWriter`
object will be returned. To do this, the pyarrow schema
will be inferred from df, and kwargs will be used for the
ParquetWriter-initialization call.
"""
_args = []
if isinstance(df, pd.DataFrame):
_cls = pq.ParquetWriter
if path:
_args.append(pa.Table.from_pandas(df, preserve_index=False).schema)
else:
_cls = cudf.io.parquet.ParquetWriter
if not path:
return _cls
ret = _cls(path, *_args, **kwargs)
if isinstance(df, pd.DataFrame):
ret.write_table = lambda df: _cls.write_table(
ret, pa.Table.from_pandas(df, preserve_index=False)
)
return ret
def _encode_list_column(original, encoded, dtype=None):
"""Convert `encoded` to be a list column with the
same offsets as `original`
"""
if isinstance(original, pd.Series):
# Pandas version (not very efficient)
offset = 0
new_data = []
for val in original.values:
size = len(val)
new_data.append(np.array(encoded[offset : offset + size], dtype=dtype))
offset += size
return pd.Series(new_data)
else:
# CuDF version
encoded = as_column(encoded)
if dtype:
encoded = encoded.astype(dtype, copy=False)
list_dtype = cudf.core.dtypes.ListDtype(encoded.dtype if dtype is None else dtype)
return build_column(
None,
dtype=list_dtype,
size=original.size,
children=(original._column.offsets, encoded),
)
def _pull_apart_list(original):
values = _flatten_list_column(original)
if isinstance(original, pd.Series):
offsets = pd.Series([0]).append(original.map(len).cumsum())
else:
offsets = original._column.offsets
elements = original._column.elements
if isinstance(elements, cudf.core.column.lists.ListColumn):
offsets = elements.list(parent=original.list._parent)._column.offsets[offsets]
return values, offsets
def _to_arrow(x):
"""Move data to arrow format"""
if isinstance(x, pd.DataFrame):
return pa.Table.from_pandas(x, preserve_index=False)
else:
return x.to_arrow()
def _concat(objs, **kwargs):
if isinstance(objs[0], (pd.DataFrame, pd.Series)):
return pd.concat(objs, **kwargs)
else:
return cudf.core.reshape.concat(objs, **kwargs)
def _make_df(_like_df=None, device=None):
if not cudf or isinstance(_like_df, (pd.DataFrame, pd.Series)):
return pd.DataFrame(_like_df)
elif isinstance(_like_df, (cudf.DataFrame, cudf.Series)):
return cudf.DataFrame(_like_df)
elif isinstance(_like_df, dict) and len(_like_df) > 0:
is_pandas = all(isinstance(v, pd.Series) for v in _like_df.values())
return pd.DataFrame(_like_df) if is_pandas else cudf.DataFrame(_like_df)
if device == "cpu":
return pd.DataFrame(_like_df)
return cudf.DataFrame(_like_df)
def _add_to_series(series, to_add, prepend=True):
if isinstance(series, pd.Series):
series_to_add = | pd.Series(to_add) | pandas.Series |
# bsub -q short -W 4:00 -R "rusage[mem=50000]" -oo multiple_dot_lists.out -eo multiple_dot_lists.err 'python multiple_dot_lists.py'
# %matplotlib inline
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
mpl.style.use('seaborn-white')
import multiprocess as mp
import numpy as np
import pandas as pd
import bioframe
import cooltools
import cooler
#import bbi
from cooltools import snipping
import sys
def pileup_multiple_dot_lists(cool_file,dot_file_list, exp_cool,resolution,flank,anchor_dist,anchor_flank,pileup_name):
i=0
filename1=cool_file[0].split("/")[-2].split("_hg38")[0]
filename2=cool_file[1].split("/")[-2].split("_hg38")[0]
filename3=cool_file[2].split("/")[-2].split("_hg38")[0]
cool = [filename1,filename2,filename3]
exp_cool = [exp_cool[0], exp_cool[1], exp_cool[2]]
conditions = ['HiC-FA-DpnII', 'HiC-DSG-DpnII','MicroC-DSG-MNase']
print(filename1)
print(filename2)
print(filename3)
resolution=resolution
flank = flank
#resolution=sys.argv[4]
hg38 = bioframe.fetch_chromsizes('hg38')
chromsizes = bioframe.fetch_chromsizes('hg38')
chromosomes = list(chromsizes.index)
binsize = resolution
cooler_paths = {
'HiC-FA-DpnII' : cool_file[0],
'HiC-DSG-DpnII' : cool_file[1],
'MicroC-DSG-MNase' : cool_file[2],
}
exp_paths = {
'HiC-FA-DpnII' : exp_cool[0],
'HiC-DSG-DpnII' : exp_cool[1],
'MicroC-DSG-MNase' : exp_cool[2],
}
long_names = {
'HiC-FA-DpnII': 'HiC-FA-DpnII',
'HiC-DSG-DpnII': 'HiC-DSG-DpnII',
'MicroC-DSG-MNase': 'MicroC-DSG-MNase',
}
pal = sns.color_palette('colorblind')
colors = {
filename1: pal[0],
filename2 : '#333333',
filename3: pal[2],
}
clrs = {
cond: cooler.Cooler(cooler_paths[cond]) for cond in conditions
}
anchor_dist = anchor_dist
anchor_flank = flank
# dot file list
gs = plt.GridSpec(nrows=len(conditions), ncols=len(dot_file_list) + 1)
plt.figure(figsize=(6 * len(conditions)+1, 7))
for dot_file in dot_file_list:
print(dot_file)
sites = pd.read_table(dot_file)
mid1=(sites['start1']+sites['end1'])/2
mid2=(sites['start2']+sites['end2'])/2
new_file=pd.DataFrame()
new_file = pd.concat([sites['chrom1'],mid1,sites['chrom2'],mid2],axis=1)
# "convergent" orientation of paired CTCF motifs
# sites = sites[(sites['strand1'] == '+') & (sites['strand2'] == '-')] ## not working
new_file.columns=['chrom1','mid1','chrom2','mid2']
print(len(new_file))
new_file.head()
supports = [(chrom, 0, chromsizes[chrom]) for chrom in chromosomes]
snippet_flank = flank
windows1 = snipping.make_bin_aligned_windows(
binsize,
new_file['chrom1'],
new_file['mid1'],
flank_bp=snippet_flank)
# windows1['strand'] = sites['strand1']
windows2 = snipping.make_bin_aligned_windows(
binsize,
new_file['chrom2'],
new_file['mid2'],
flank_bp=snippet_flank)
windows = | pd.merge(windows1, windows2, left_index=True, right_index=True, suffixes=('1', '2')) | pandas.merge |
import json
import numpy as np
import pytest
from pandas import DataFrame, Index, json_normalize
import pandas._testing as tm
from pandas.io.json._normalize import nested_to_record
@pytest.fixture
def deep_nested():
# deeply nested data
return [
{
"country": "USA",
"states": [
{
"name": "California",
"cities": [
{"name": "San Francisco", "pop": 12345},
{"name": "Los Angeles", "pop": 12346},
],
},
{
"name": "Ohio",
"cities": [
{"name": "Columbus", "pop": 1234},
{"name": "Cleveland", "pop": 1236},
],
},
],
},
{
"country": "Germany",
"states": [
{"name": "Bayern", "cities": [{"name": "Munich", "pop": 12347}]},
{
"name": "Nordrhein-Westfalen",
"cities": [
{"name": "Duesseldorf", "pop": 1238},
{"name": "Koeln", "pop": 1239},
],
},
],
},
]
@pytest.fixture
def state_data():
return [
{
"counties": [
{"name": "Dade", "population": 12345},
{"name": "Broward", "population": 40000},
{"name": "<NAME>", "population": 60000},
],
"info": {"governor": "<NAME>"},
"shortname": "FL",
"state": "Florida",
},
{
"counties": [
{"name": "Summit", "population": 1234},
{"name": "Cuyahoga", "population": 1337},
],
"info": {"governor": "<NAME>"},
"shortname": "OH",
"state": "Ohio",
},
]
@pytest.fixture
def author_missing_data():
return [
{"info": None},
{
"info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},
"author_name": {"first": "Jane", "last_name": "Doe"},
},
]
@pytest.fixture
def missing_metadata():
return [
{
"name": "Alice",
"addresses": [
{
"number": 9562,
"street": "Morris St.",
"city": "Massillon",
"state": "OH",
"zip": 44646,
}
],
},
{
"addresses": [
{
"number": 8449,
"street": "Spring St.",
"city": "Elizabethton",
"state": "TN",
"zip": 37643,
}
]
},
]
@pytest.fixture
def max_level_test_input_data():
"""
input data to test json_normalize with max_level param
"""
return [
{
"CreatedBy": {"Name": "User001"},
"Lookup": {
"TextField": "Some text",
"UserField": {"Id": "ID001", "Name": "Name001"},
},
"Image": {"a": "b"},
}
]
class TestJSONNormalize:
def test_simple_records(self):
recs = [
{"a": 1, "b": 2, "c": 3},
{"a": 4, "b": 5, "c": 6},
{"a": 7, "b": 8, "c": 9},
{"a": 10, "b": 11, "c": 12},
]
result = json_normalize(recs)
expected = DataFrame(recs)
tm.assert_frame_equal(result, expected)
def test_simple_normalize(self, state_data):
result = json_normalize(state_data[0], "counties")
expected = DataFrame(state_data[0]["counties"])
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, "counties")
expected = []
for rec in state_data:
expected.extend(rec["counties"])
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
result = json_normalize(state_data, "counties", meta="state")
expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])
tm.assert_frame_equal(result, expected)
def test_empty_array(self):
result = json_normalize([])
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_simple_normalize_with_separator(self, deep_nested):
# GH 14883
result = json_normalize({"A": {"A": 1, "B": 2}})
expected = DataFrame([[1, 2]], columns=["A.A", "A.B"])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({"A": {"A": 1, "B": 2}}, sep="_")
expected = DataFrame([[1, 2]], columns=["A_A", "A_B"])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize({"A": {"A": 1, "B": 2}}, sep="\u03c3")
expected = DataFrame([[1, 2]], columns=["A\u03c3A", "A\u03c3B"])
tm.assert_frame_equal(result.reindex_like(expected), expected)
result = json_normalize(
deep_nested,
["states", "cities"],
meta=["country", ["states", "name"]],
sep="_",
)
expected = Index(["name", "pop", "country", "states_name"]).sort_values()
assert result.columns.sort_values().equals(expected)
def test_value_array_record_prefix(self):
# GH 21536
result = json_normalize({"A": [1, 2]}, "A", record_prefix="Prefix.")
expected = DataFrame([[1], [2]], columns=["Prefix.0"])
tm.assert_frame_equal(result, expected)
def test_nested_object_record_path(self):
# GH 22706
data = {
"state": "Florida",
"info": {
"governor": "<NAME>",
"counties": [
{"name": "Dade", "population": 12345},
{"name": "Broward", "population": 40000},
{"name": "<NAME>", "population": 60000},
],
},
}
result = json_normalize(data, record_path=["info", "counties"])
expected = DataFrame(
[["Dade", 12345], ["Broward", 40000], ["<NAME>", 60000]],
columns=["name", "population"],
)
tm.assert_frame_equal(result, expected)
def test_more_deeply_nested(self, deep_nested):
result = json_normalize(
deep_nested, ["states", "cities"], meta=["country", ["states", "name"]]
)
ex_data = {
"country": ["USA"] * 4 + ["Germany"] * 3,
"states.name": [
"California",
"California",
"Ohio",
"Ohio",
"Bayern",
"Nordrhein-Westfalen",
"Nordrhein-Westfalen",
],
"name": [
"<NAME>",
"Los Angeles",
"Columbus",
"Cleveland",
"Munich",
"Duesseldorf",
"Koeln",
],
"pop": [12345, 12346, 1234, 1236, 12347, 1238, 1239],
}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_shallow_nested(self):
data = [
{
"state": "Florida",
"shortname": "FL",
"info": {"governor": "<NAME>"},
"counties": [
{"name": "Dade", "population": 12345},
{"name": "Broward", "population": 40000},
{"name": "<NAME>", "population": 60000},
],
},
{
"state": "Ohio",
"shortname": "OH",
"info": {"governor": "<NAME>"},
"counties": [
{"name": "Summit", "population": 1234},
{"name": "Cuyahoga", "population": 1337},
],
},
]
result = json_normalize(
data, "counties", ["state", "shortname", ["info", "governor"]]
)
ex_data = {
"name": ["Dade", "Broward", "<NAME>", "Summit", "Cuyahoga"],
"state": ["Florida"] * 3 + ["Ohio"] * 2,
"shortname": ["FL", "FL", "FL", "OH", "OH"],
"info.governor": ["<NAME>"] * 3 + ["<NAME>"] * 2,
"population": [12345, 40000, 60000, 1234, 1337],
}
expected = DataFrame(ex_data, columns=result.columns)
tm.assert_frame_equal(result, expected)
def test_meta_name_conflict(self):
data = [
{
"foo": "hello",
"bar": "there",
"data": [
{"foo": "something", "bar": "else"},
{"foo": "something2", "bar": "else2"},
],
}
]
msg = r"Conflicting metadata name (foo|bar), need distinguishing prefix"
with pytest.raises(ValueError, match=msg):
json_normalize(data, "data", meta=["foo", "bar"])
result = json_normalize(data, "data", meta=["foo", "bar"], meta_prefix="meta")
for val in ["metafoo", "metabar", "foo", "bar"]:
assert val in result
def test_meta_parameter_not_modified(self):
# GH 18610
data = [
{
"foo": "hello",
"bar": "there",
"data": [
{"foo": "something", "bar": "else"},
{"foo": "something2", "bar": "else2"},
],
}
]
COLUMNS = ["foo", "bar"]
result = json_normalize(data, "data", meta=COLUMNS, meta_prefix="meta")
assert COLUMNS == ["foo", "bar"]
for val in ["metafoo", "metabar", "foo", "bar"]:
assert val in result
def test_record_prefix(self, state_data):
result = | json_normalize(state_data[0], "counties") | pandas.io.json.json_normalize |
import birankpy
import pandas as pd
import sys
import numpy as np
from scipy import stats
import argparse
def read_data(filepath):
try:
data = pd.read_csv(filepath)
# print("loading data ")
except:
data = pd.read_csv(filepath,sep='\t')
# print("loading data ")
first_column = data.iloc[:, 0]
second_column = data.iloc[:, 1]
print("data columns\n",data.columns)
print("unique of first column:",data.columns[0],len(first_column.unique()),"unique of second column:",data.columns[1],len(second_column.unique()))
print()
return data
def topk_computing(ground_truth_user,ground_truth_tweet,user_birank_df,tweet_birank_df,result_values):
user_number_top_20 = int(ground_truth_user.shape[0]*args.topk) # top k percent
# user_number_top_20=100 # top 50
groundtruth_user_top20 =ground_truth_user.iloc[0:user_number_top_20]['user'].to_list()
tweet_number_top_20 = int(ground_truth_tweet.shape[0]*args.topk) # top k percent
# tweet_number_top_20=100 # top 50
groundtruth_tweet_top20 =ground_truth_tweet.iloc[0:tweet_number_top_20]['tweet'].to_list()
predicted_user_top20 = user_birank_df.iloc[0:user_number_top_20]['user'].to_list()
predicted_tweet_top20 = tweet_birank_df.iloc[0:tweet_number_top_20]['tweet'].to_list()
# result_values=[]
common_value_user = set(groundtruth_user_top20).intersection(set(predicted_user_top20))
top20_accuracy_user = len(common_value_user)/user_number_top_20
result_values.append(top20_accuracy_user)
print("topk:{} accuracy user:".format(args.topk),top20_accuracy_user)
common_value_tweet = set(groundtruth_tweet_top20).intersection(set(predicted_tweet_top20))
top20_accuracy_tweet = len(common_value_tweet)/tweet_number_top_20
result_values.append(top20_accuracy_tweet)
print("topk:{} accuracy tweet:".format(args.topk),top20_accuracy_tweet)
return result_values
def calclulate_spearman(a,b,type,result_values):
corr, p= stats.spearmanr(a,b)
print('{} spearmanr coefficient:'.format(type),corr,p)
result_values.append((corr,p))
return result_values
def parse_args():
parser = argparse.ArgumentParser(description="Node clustering")
parser.add_argument('--dataset', type=str, help='dataset name')
parser.add_argument('--ut', type=str, default='', help='user item graph')
parser.add_argument('--uu', type=str, default='', help='user user graph')
parser.add_argument('--tt', type=str, default='', help='item item graph')
parser.add_argument('--gu', type=str, default='', help='user groundtruth ranking')
parser.add_argument('--gt', type=str, default='', help='item groundtruth ranking')
parser.add_argument('--topk', type=float, default=0.01, help='topk pecent of the dataset')
parser.add_argument('--model', type=str,default='proposed', help='ranking model')
parser.add_argument('--alpha', type=float, default=0.425, help='alpha')
parser.add_argument('--delta', type=float, default=0.425, help='delta')
parser.add_argument('--beta', type=float, default=0.425, help='beta')
parser.add_argument('--gamma', type=float, default=0.425, help='gamma')
parser.add_argument('--merge_tt', type=int, default=1, help='merge item-item graph.')
parser.add_argument('--sampling_uu',type=int, default=0,help='if sampling uu graph is required')
parser.add_argument('--sampling_tt',type=int, default=0,help='if sampling tt graph is required')
parser.add_argument('--verbose',type=int, default=0,help='print more information')
args, unknown = parser.parse_known_args()
return args
def save_results(args,results):
wp = open('./result_logs/{}_{}_{}'.format(args.model,args.dataset,args.merge_tt),'w')
wp.write("alpha: {} delta: {} beta: {} gamma: {} topk: {}\n".format(args.alpha,args.delta,args.beta,args.gamma,args.topk))
wp.write("topk: {1} recommendataion user: {0}\n".format(results[0],args.topk))
wp.write("topk: {1} recommendataion tweet: {0}\n".format(results[1],args.topk))
wp.write("spearmanr user corr and p: {}\n".format(results[2]))
wp.write("spearmanr tweet corr and p: {}\n".format(results[3]))
wp.write("\n")
wp.close()
if __name__ == '__main__':
args = parse_args()
if args.model not in ['proposed','HITS','CoHITS','BGRM','BiRank']:
print("model is not defined")
sys.exit()
args.ut = 'Ranking/{}/{}.ut'.format(args.dataset,args.dataset)
args.uu = 'Ranking/{}/{}.uu'.format(args.dataset,args.dataset)
args.tt = 'Ranking/{}/{}.tt'.format(args.dataset,args.dataset)
args.gu = 'Ranking/{}/{}.gt_user'.format(args.dataset,args.dataset)
args.gt = 'Ranking/{}/{}.gt_tweet'.format(args.dataset,args.dataset)
ut= read_data(args.ut)
uu = read_data(args.uu)
tt = read_data(args.tt)
ground_truth_user = read_data(args.gu)
ground_truth_user.sort_values('num_followers',ascending=False,inplace=True)
ground_truth_user['num_followers'] = ground_truth_user['num_followers']/sum(ground_truth_user['num_followers'])
print(ground_truth_user.head())
ground_truth_tweet = read_data(args.gt)
ground_truth_tweet.sort_values('num_favorites_retweets',ascending=False,inplace=True)
ground_truth_tweet['num_favorites_retweets']=ground_truth_tweet['num_favorites_retweets']/sum(ground_truth_tweet['num_favorites_retweets'])
print(ground_truth_tweet.head())
columns_ut = ut.columns
columns_uu = uu.columns
columns_tt = tt.columns
print("columns names in user-tweet data:",columns_ut)
print("columns names in user-user data:",columns_uu)
print("columns names in item-item data:",columns_tt)
print('user-item graph shape',ut.shape)
print('user-user graph shape',uu.shape)
print('item-item graph shape',tt.shape)
bn = birankpy.BipartiteNetwork()
if args.model =='proposed':
bn.set_edgelist_two_types(
ut,
uu,
tt,
top_col=columns_ut[0], bottom_col=columns_ut[1],
weight_col=None,
weight_col_2=None,
weight_col_3=None
)
user_birank_df, tweet_birank_df,iteration = bn.generate_birank_new(args)
user_birank_df.sort_values(by=bn.top_col+'_birank', ascending=False,inplace=True)
tweet_birank_df.sort_values(by=bn.bottom_col+'_birank', ascending=False,inplace=True)
print(user_birank_df.head(5))
print(tweet_birank_df.head(5))
else:
bn.set_edgelist(
ut,
top_col=columns_ut[0], bottom_col=columns_ut[1],
weight_col=None,
)
user_birank_df, tweet_birank_df = bn.generate_birank(normalizer=args.model)
user_birank_df.sort_values(by=bn.top_col+'_birank', ascending=False,inplace=True)
tweet_birank_df.sort_values(by=bn.bottom_col+'_birank', ascending=False,inplace=True)
print(user_birank_df.head(5))
print(tweet_birank_df.head(5))
result_values = []
topk_computing(ground_truth_user,ground_truth_tweet,user_birank_df,tweet_birank_df,result_values)
#merge ground truth and predicted
# user_merged = pd.merge(ground_truth_user[ground_truth_user['num_followers']>=0],user_birank_df,on='user')
# tweet_merged = pd.merge(ground_truth_tweet[ground_truth_tweet['num_favorites_retweets']>=0],tweet_birank_df,on='tweet')
user_merged = pd.merge(user_birank_df,ground_truth_user[ground_truth_user['num_followers']>=0],on='user')
tweet_merged = | pd.merge(tweet_birank_df,ground_truth_tweet[ground_truth_tweet['num_favorites_retweets']>=0],on='tweet') | pandas.merge |
# -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import ShuffleSplit, cross_validate
def crossvalidate_pipeline_scores(X, y, pipelines, n_splits, random_state):
"""crossvalidates all pipelines in the provided dictionary and returns scores (R2, neg-MAE, neg-MRE)
:param X: Dataframe with the predictors
:type X: dict
:param y: Pandas series with the target values
:type y: series
:param pipelines: dictionary with the name of the model as key and pipeline as value
:type pipelines: dict
:param n_splits: how many splits to do in crossvalidation
:type n_splits: int
:param random_state: random state for splitting
:type random_state: int
:return: Dataframe with scores calculated for each fold and model
:rtype: dataframe
"""
cv = ShuffleSplit(n_splits=n_splits, random_state=random_state)
scores = {}
for modelname, pipeline in pipelines.items():
print("Crossvalidating", modelname)
score = cross_validate(
pipeline,
X,
y,
cv=cv,
scoring=("r2", "neg_mean_absolute_error", "neg_mean_squared_error"),
)
scores.update({modelname: score})
# opening the nested dictionary to a dataframe
scores = pd.concat({k: pd.DataFrame(v).T for k, v in scores.items()}, axis=0)
scores.index.names = "model", "metric"
scores.reset_index(inplace=True)
scores = pd.melt(scores, id_vars=["model", "metric"], var_name="fold")
scores = scores.assign(fold=scores.fold + 1)
return scores
def plot_scores(scores, show_costs=False, save=False, plotname=None):
"""Generates BoxPlots for all metrics
:param scores: Dataframe with columns model, metric, fold and value (output from crossvalidate_pipelines)
:type scores: dataframe
:param show_cost: Plot the computation cost metrics
:type show_cost: boolean
:param save: Save created plots to reports/figures/
:type show_cost: boolean
"""
for metric in scores.metric.drop_duplicates():
if not show_costs:
if metric not in [
"test_r2",
"test_neg_mean_absolute_error",
"test_neg_mean_squared_error",
]:
continue
sns.boxplot(x="model", y="value", data=scores[scores.metric == metric])
plt.title(metric)
plt.tight_layout()
if save:
plt.savefig(
os.path.join("reports", "figures", plotname + "_" + metric + ".png")
)
plt.show()
def train_and_plot_prediction_metrics(X_train, y_train, X_test, y_test, pipelines):
"""Trains the pipelines with train data, predict test data with trained
models and the plots MAE, MSE and R2 metrics
:param X_train: Training data features
:type X_train: dataframe
:param y_train: Training data target
:type y_train: array
:param y_test: Test data target
:type y_test: array
:param pipelines: dictionary with the name of the model as key and pipeline as value
:type pipelines: dict
"""
scores = | pd.DataFrame(columns=["Model", "MAE", "MSE", "R2"]) | pandas.DataFrame |
# @file riverlog_for_gis.py
# @brief riverlog related library, share with DevZone
# @author <EMAIL>
import requests
import json
import os
import pandas as pd
from datetime import timedelta, datetime,date
import time
from pandas.api.types import is_numeric_dtype
def url_get(filename,url,reload=False):
"""
get url to file
"""
#print("filename=%s,url=%s" %(filename,url))
if os.path.isfile(filename) and reload==False:
return
else:
r = requests.get(url, params = {})
open(filename, 'wb').write(r.content)
def load_json_local(filename,file_id):
"""
load json file and transfer to panda
hardcode: handle json with data in 'data'
"""
with open(filename, 'r') as json_file:
data_head = json.load(json_file)
if file_id=="elev-gridData":
data = data_head['data']['data']
else:
data = data_head['data']
if len(data)>0:
cols = data[0].keys()
else:
return None
out = []
for row in data:
item = []
for c in cols:
item.append(row.get(c, {}))
out.append(item)
return pd.DataFrame(out, columns=cols)
def api_to_csv(api_id,pars,reload=False):
"""
get api data and save to csv.
api_id: api_path with '-' as delimiter
pars = [], put parameters as string
"""
api_map={
"rain-dailySum":"https://riverlog.lass-net.org/rain/dailySum?year=%i",
"rain-10minSum":"https://riverlog.lass-net.org/rain/10minSum?date=%s",
"rain-station":"https://riverlog.lass-net.org/rain/station",
"rain-rainData":"https://riverlog.lass-net.org/rain/rainData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"waterLevel-station":"https://riverlog.lass-net.org/waterLevel/station",
"waterLevel-waterLevelData":"https://riverlog.lass-net.org/waterLevel/waterLevelData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"waterLevelDrain-station":"https://riverlog.lass-net.org/waterLevelDrain/station",
"waterLevelDrain-waterLevelDrainData":"https://riverlog.lass-net.org/waterLevelDrain/waterLevelDrainData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"waterLevelAgri-station":"https://riverlog.lass-net.org/waterLevelAgri/station",
"waterLevelAgri-waterLevelAgriData":"https://riverlog.lass-net.org/waterLevelAgri/waterLevelAgriData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"sewer-station":"https://riverlog.lass-net.org/sewer/station",
"sewer-sewerData":"https://riverlog.lass-net.org/sewer/sewerData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"tide-station":"https://riverlog.lass-net.org/tide/station",
"tide-tideData":"https://riverlog.lass-net.org/tide/tideData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"pump-station":"https://riverlog.lass-net.org/pump/station",
"pump-pumpData":"https://riverlog.lass-net.org/pump/pumpData?date=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"reservoir-info":"https://riverlog.lass-net.org/reservoir/info",
"reservoir-reservoirData":"https://riverlog.lass-net.org/reservoir/reservoirData?date=%s",
"flood-station":"https://riverlog.lass-net.org/flood/station",
"flood-floodData":"https://riverlog.lass-net.org/flood/floodData?date=%s",
"alert-alertData":"https://riverlog.lass-net.org/alert/alertData?date=%s",
"alert-alertStatistic":"https://riverlog.lass-net.org/alert/alertStatistic?year=%s",
"alert-typhoonData":"https://riverlog.lass-net.org/alert/typhoonData?date=%s", # date can change to year
"elev-gridData":"https://riverlog.lass-net.org/elev/gridData?level=%s&minLat=%s&maxLat=%s&minLng=%s&maxLng=%s",
"statistic-waterUseAgriculture":"https://riverlog.lass-net.org/statistic/waterUseAgriculture",
"statistic-waterUseCultivation":"https://riverlog.lass-net.org/statistic/waterUseCultivation",
"statistic-waterUseLivestock":"https://riverlog.lass-net.org/statistic/waterUseLivestock",
"statistic-waterUseLiving":"https://riverlog.lass-net.org/statistic/waterUseLiving",
"statistic-waterUseIndustry":"https://riverlog.lass-net.org/statistic/waterUseIndustry",
"statistic-waterUseOverview":"https://riverlog.lass-net.org/statistic/waterUseOverview",
"statistic-monthWaterUse":"https://riverlog.lass-net.org/statistic/monthWaterUse",
"statistic-reservoirUse":"https://riverlog.lass-net.org/statistic/reservoirUse",
"statistic-reservoirSiltation":"https://riverlog.lass-net.org/statistic/reservoirSiltation"
}
url_fmt = api_map[api_id]
if pars:
if len(pars)==1:
url = url_fmt %(pars[0])
filename_prefix = "output/%s_%s" %(api_id,pars[0])
elif api_id=="":
pass
else: #5 parameters
url = url_fmt %(pars[0],pars[1],pars[2],pars[3],pars[4])
filename_prefix = "output/%s_%s" %(api_id,pars[0])
else: #None
url = url_fmt
filename_prefix = "output/%s" %(api_id)
cont = True
while cont:
filename = filename_prefix + ".json"
url_get(filename,url,reload)
df = load_json_local(filename,api_id)
if df is None:
print("%s don't have data" %(api_id))
return None
try:
cont = False
except:
print("Exception when process %s, retrying after 60s" %(filename))
if os.path.isfile(filename):
os.remove(filename)
time.sleep(60)
filename = filename_prefix + ".csv"
print("%s: %s saved, shape = %s" %(api_id,filename, str(df.shape)))
df.to_csv(filename)
return df
def proc_Sum(file_loc, file_src,file_row,file_geo):
"""
process dailySum from columns to rows, by area , add location geo info
1: merge Sum/Num to Avg
2. drop unused columns
3. columns to rows
4. merge geo info
"""
df = pd.read_csv(file_src)
df['central']=df['centralSum']/df['centralNum']
df['north']=df['northSum']/df['northNum']
df['south']=df['southSum']/df['southNum']
df1=df.drop(['centralSum', 'centralNum','northSum','northNum','southSum','southNum','Unnamed: 0'], axis=1)
df2 = (df1.set_index(["time"])
.stack()
.reset_index(name='Value')
.rename(columns={'level_1':'location'}))
df2.to_csv(file_row)
df_geo = pd.read_csv(file_loc)
df_merge = pd.merge(df2, df_geo, on='location')
df_final = df_merge.sort_values(by='time')
df_final.to_csv(file_geo)
return df_final
def minSum_range(start_str,end_str):
"""
get 10minSum by date range. merge to 1 CSV
"""
start_date = datetime.strptime(start_str, "%Y-%m-%d")
end_date = datetime.strptime(end_str, "%Y-%m-%d")
first_day = True
date_now = start_date
df_all = None
while True:
if date_now > end_date:
break
month = date_now.month
year = date_now.year
file_datestr = date_now.strftime("%Y-%m-%d")
df = api_to_csv("rain-10minSum",[file_datestr])
#df['datetime']="%s" %(date_now.strftime("%Y/%m/%d"))
if first_day:
df_all = df
first_day = False
else:
df_all = pd.concat([df_all,df])
date_now += timedelta(days=1)
filename = "output/%s_%s_%s.csv" %("rain-10minSum",start_str,end_str)
print("rain-10minSum saved %s, shape = %s" %(filename, str(df_all.shape)))
df_save = df_all.sort_values(by='time')
df_save.to_csv(filename,header=True,float_format="%.2f")
def api_to_csv_range(start_str,end_str,api_id,pars,sort_col_name):
"""
get api by date range. merge to 1 CSV
"""
start_date = datetime.strptime(start_str, "%Y-%m-%d")
end_date = datetime.strptime(end_str, "%Y-%m-%d")
first_day = True
date_now = start_date
df_all = None
while True:
if date_now > end_date:
break
month = date_now.month
year = date_now.year
file_datestr = date_now.strftime("%Y-%m-%d")
if pars is None:
df = api_to_csv(api_id,[file_datestr])
else:
real_pars = [file_datestr]
real_pars.extend(pars)
df = api_to_csv(api_id,real_pars)
#df['datetime']="%s" %(date_now.strftime("%Y/%m/%d"))
if first_day:
df_all = df
first_day = False
else:
df_all = pd.concat([df_all,df])
date_now += timedelta(days=1)
filename = "output/%s_%s_%s.csv" %(api_id,start_str,end_str)
print("%s saved %s, shape = %s" %(api_id,filename, str(df_all.shape)))
df_save = df_all.sort_values(by=sort_col_name)
df_save.to_csv(filename,header=True,float_format="%.2f")
return filename
def date_to_gmt8(date_str):
"""
transfer date string to GMT+8
ex: 2021-05-31T16:00:00.000Z
"""
#date object
date_obj = datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S.000Z")
#+8 hour
hours_added = timedelta(hours = 8)
date_gmt8 = date_obj + hours_added
#output format
date_ret = date_gmt8.strftime("%Y-%m-%d %H:%M:%S")
return date_ret
def csv_add_gmt8(file_src,col_name,file_dest):
"""
add GMT8 time to CSV by re-format one column
"""
df = pd.read_csv(file_src)
df[col_name + "GMT8"] = df[col_name].apply(date_to_gmt8)
df_save=df.drop(['Unnamed: 0'], axis=1)
df_save.to_csv(file_dest)
return df_save
case_id=2 # 0: first version, 1: reservoir data by date, 2: for notebook debug
if case_id==0:
if 1: #get each api to CSV
api_to_csv("rain-dailySum",[2020])
api_to_csv("rain-10minSum",["2020-09-01"])
api_to_csv("rain-station",None)
api_to_csv("rain-rainData",["2020-09-01","23","24","121","122"])
api_to_csv("waterLevel-station",None)
api_to_csv("waterLevel-waterLevelData",["2020-09-01","23","24","121","122"])
api_to_csv("waterLevelDrain-station",None)
api_to_csv("waterLevelDrain-waterLevelDrainData",["2019-12-03","23","24","120","122"])
api_to_csv("waterLevelAgri-station",None)
api_to_csv("waterLevelAgri-waterLevelAgriData",["2019-12-03","23","24","120","122"])
api_to_csv("sewer-station",None)
api_to_csv("sewer-sewerData",["2019-12-02","24","25","121","122"])
api_to_csv("tide-station",None)
api_to_csv("tide-tideData",["2020-09-01","23","24","121","122"])
api_to_csv("pump-station",None)
api_to_csv("pump-pumpData",["2019-12-03","25","26","121","122"])
api_to_csv("reservoir-info",None)
api_to_csv("reservoir-reservoirData",["2020-09-01"])
api_to_csv("flood-station",None)
api_to_csv("flood-floodData",["2020-09-01"])
api_to_csv("alert-alertData",["2020-09-01"])
api_to_csv("alert-alertStatistic",[2020])
api_to_csv("alert-typhoonData",["2020-09-01"])
api_to_csv("elev-gridData",["7","23","24","120","121"])
api_to_csv("statistic-waterUseAgriculture",None)
api_to_csv("statistic-waterUseCultivation",None)
api_to_csv("statistic-waterUseLivestock",None)
api_to_csv("statistic-waterUseLiving",None)
api_to_csv("statistic-waterUseIndustry",None)
api_to_csv("statistic-waterUseOverview",None)
api_to_csv("statistic-monthWaterUse",None)
api_to_csv("statistic-reservoirUse",None)
api_to_csv("statistic-reservoirSiltation",None)
if 1: #process rain-dailySum,10minSum , predefined 3 area geo definition: areaGeo.csv
api_to_csv("rain-dailySum",[2020])
proc_Sum("areaGeo.csv","output/rain-dailySum_2020.csv","output/rain-dailySum_2020_row.csv","output/rain-dailySum_2020_geo.csv")
minSum_range("2020-10-01","2020-10-05")
proc_Sum("areaGeo.csv","output/rain-10minSum_2020-10-01_2020-10-05.csv","output/rain-10minSum_2020-10-01_2020-10-05_row.csv","output/rain-10minSum_2020-10-01_2020-10-05_geo.csv")
def get_value_by_index(df,keyvalue, target_col):
"""
find df's column(key) = value, return value of target_col
keyvalue: col_name=value
"""
cols = keyvalue.split("=")
if len(cols)!=2:
return ""
keyvalue_key = cols[0]
keyvalue_value = cols[1]
if is_numeric_dtype(df[keyvalue_key]):
keyvalue_value=float(cols[1])
if not target_col in df.columns:
return ""
values = df[df[keyvalue_key]==keyvalue_value][target_col].values.tolist()
if len(values)>0:
value = values[0]
else:
value = ""
return value
#----- 多水庫庫容百分比分析
def reservoir_load(bag,date_start, date_end, reservoir_list): #[10405,10201,10205]
df_info = api_to_csv("reservoir-info",None)
filename=api_to_csv_range(date_start,date_end,"reservoir-reservoirData",None,"ObservationTime")
dest_name="%s_GMT8.csv" %(filename[:-4])
df=csv_add_gmt8(filename,"ObservationTime", dest_name )
#handle info
df_info=df_info[df_info['Year']==105]
df_info.drop_duplicates(subset="id")
df_info["id"] = pd.to_numeric(df_info["id"])
#merge/filter
df2=df.merge(df_info, how='left', left_on='ReservoirIdentifier', right_on='id')
df2=df2.drop_duplicates(subset=["ObservationTime","ReservoirIdentifier"],keep='last')
df2=df2[df2['ReservoirIdentifier'].isin(reservoir_list)] #,20101,20201
#Calculate, Pivot
df2["ObservationTimeGMT8"] = pd.to_datetime(df2['ObservationTimeGMT8'])
df2['percent']=df2['EffectiveWaterStorageCapacity']/df2['EffectiveCapacity']*100
df2=df2[df2['percent']<=100]
df3 = df2.pivot(index='ObservationTimeGMT8', columns='ReservoirName', values='percent')
bag['reservoir-info']=df_info
bag['reservoir-reservoirData']=df2
bag['reservoir_pivot']=df3
def reservoir_plot(bag):
#plot
#%matplotlib notebook
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
myfont = FontProperties(fname=r'/Library/Fonts/Microsoft/SimSun.ttf')
df = bag['reservoir_pivot']
df.plot()
plt.title("多水庫2021庫容比例",fontproperties=myfont)
plt.legend(prop=myfont)
plt.xticks(fontname = 'SimSun',size=8)
plt.yticks(fontname = 'SimSun',size=8)
plt.xlabel('時間',fontproperties=myfont)
plt.ylabel('百分比',fontproperties=myfont)
plt.show
#----- 今日淹水
def flood_load(bag,date_str,limit=0):
#load 測站縣市補充資料
df_info_縣市鄉鎮 = pd.read_csv("flood-station_縣市鄉鎮.csv")
#get data, process
df_info=api_to_csv("flood-station",None)
df_info=df_info.merge(df_info_縣市鄉鎮, how='left', left_on='_id', right_on='_id')
df_info
#date_str = date.today() # 2021-06-07
print("Today is %s" %(date_str))
df = api_to_csv("flood-floodData",[date_str])
df["timeGMT8"] = df['time'].apply(date_to_gmt8)
df["timeGMT8"] = pd.to_datetime(df['timeGMT8'])
df=df.merge(df_info_縣市鄉鎮, how='left', left_on='stationID', right_on='_id')
df=df.drop_duplicates(subset=["time","stationName"],keep='last')
df['stationName_city']=df['COUNTYNAME'] + '|' + df['TOWNNAME'] + '|' + df['stationName']
#filter, sort
df=df[df['value']>=limit] #可改淹水高度, 有很多淹水資料時,改高一點比較不會太多
df.sort_values(by=['timeGMT8'])
bag['flood-station_縣市鄉鎮']=df_info_縣市鄉鎮
bag['flood-station']=df_info
bag['flood-floodData']=df
def flood_plot(bag,date_str):
#%matplotlib notebook
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
df = bag['flood-floodData']
myfont = FontProperties(fname=r'/Library/Fonts/Microsoft/SimSun.ttf')
df2 = df.pivot(index='timeGMT8', columns='stationName_city', values='value')
df2.plot(style='.-')
title = "今日 %s 淹水感測器淹水值" %(date_str)
plt.title(title,fontproperties=myfont)
plt.legend(prop=myfont)
plt.xticks(fontname = 'SimSun',size=8)
plt.yticks(fontname = 'SimSun',size=8)
plt.xlabel('時間',fontproperties=myfont)
plt.ylabel('公分',fontproperties=myfont)
fig = plt.gcf()
fig.set_size_inches(8.5, 4.5)
plt.show
#淹水測站列表
def flood_list(bag):
df = bag['flood-floodData']
ary = df['stationName_city'].unique()
for name in ary:
print(name)
#----- 雨量站相關
#列出新竹市測站
def rain_station_view():
df_info = api_to_csv("rain-station",None)
filter_city = df_info['city']=='新竹縣'
df_info = df_info[filter_city]
return df_info
def rain_load(bag, date_str,limit=0,reload=False):
df_info = api_to_csv("rain-station",None)
#date_str = date.today() # 2021-06-07
print("Today is %s" %(date_str))
df=api_to_csv("rain-rainData",[date_str,"20","26","120","122"],reload)
df["timeGMT8"] = df['time'].apply(date_to_gmt8)
df["timeGMT8"] = pd.to_datetime(df['timeGMT8'])
df=df.merge(df_info, how='left', left_on='stationID', right_on='stationID')
df=df.drop_duplicates(subset=["timeGMT8","stationID"],keep='last')
df['stationName']=df['city'] + '|' + df['town'] + '|' + df['name'] + '|' + df['stationID']
#filter, sort
df=df[df['now']>=limit] #可改雨量值, 有很多淹水資料時,改高一點比較不會太多
df=df.sort_values(by=['timeGMT8','stationID'])
bag['rain-station']=df_info
bag['rain-rainData']=df
#今日雨量 pivot
def rain_plot(bag,date_str, user_df=None):
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
if user_df is None:
df = bag['rain-rainData']
else:
df = user_df
myfont = FontProperties(fname=r'/Library/Fonts/Microsoft/SimSun.ttf')
df2 = df.pivot(index='timeGMT8', columns='stationName', values='now')
df2.plot(style='.-')
title = "今日 %s 雨量站值" %(date_str)
plt.title(title,fontproperties=myfont)
plt.legend(prop=myfont)
plt.xticks(fontname = 'SimSun',size=8)
plt.yticks(fontname = 'SimSun',size=8)
plt.xlabel('時間',fontproperties=myfont)
plt.ylabel('mm',fontproperties=myfont)
fig = plt.gcf()
fig.set_size_inches(8.5, 4.5)
plt.show
def rain_hourdiff(bag, time_set,station_city):
#時雨量
df_info = bag['rain-station']
df = bag['rain-rainData']
#df_info.head()
if station_city is None:
stations = None
else:
f1=df_info['city'].isin(station_city)
#df_info[f1].values.tolist()
#df_info[f1]['city'].unique()
stations = df_info[f1]['stationID'].tolist()
#print(stations)
#df.head()
#time_set=['2021-06-10 15:00:00','2021-06-10 16:00:00']
f_time=df['timeGMT8'].isin(time_set)
if stations is None:
df_f = df[f_time]
else:
f_station=df['stationID'].isin(stations)
df_f = df[f_station & f_time]
#df[f_station]
#df['city'].unique()
if len(df_f.index)>0:
#print(df_f)
df_f = df_f.drop_duplicates(['stationName','timeGMT8'])
df_pivot = df_f.pivot(index='stationName', columns='timeGMT8', values='now')
print("time_set=%s" %(time_set))
#print(df_pivot)
df_pivot['rain_1hour']=df_pivot[time_set[1]]-df_pivot[time_set[0]]
bag['rain-hourdiff']=df_pivot
return True
else:
print("no data!")
return False
def to_slot_10min(t_src):
#t_now = datetime.now()
#t_added = timedelta(minutes = 10)
#t_slot= t_src - t_added
slot_min=int(int(t_src.minute/10)*10)
#date_str="%i-%i-%i %i:%02i:00" %(t_src.year,t_src.month,t_src.day,t_src.hour,slot_min)
date_str="%i-%02i-%02i %02i:%02i:00" %(t_src.year,t_src.month,t_src.day,t_src.hour,slot_min)
return date_str
def get_2slot(t_src,hour):
#print("t_src=%s" %(t_src))
date_str = to_slot_10min(t_src)
date_obj = datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S")
date_obj2 = date_obj + timedelta(hours = hour)
date_str2 = date_obj2.strftime("%Y-%m-%d %H:%M:%S")
return [date_str,date_str2]
def rain_alarm_hour(bag,station_city,limit):
rain_load(gd, date.today(),True)
#time_set=['2021-06-10 15:00:00','2021-06-10 16:00:00']
time_now = datetime.now()
time_set = get_2slot(time_now-timedelta(minutes = 90),1)
#print(time_set)
#station_city=['新竹縣','新竹市']
rain_hourdiff(gd,time_set,station_city)
df_pivot = gd['rain-hourdiff']
if len(df_pivot.index)>0:
df_pivot=df_pivot[df_pivot['rain_1hour']>limit]
df_pivot=df_pivot.sort_values(by=['rain_1hour'],ascending=False)
print("-----\nMonitor time: %s : %s 雨量站時雨量 > %i mm -----\n" %(time_now.strftime("%Y-%m-%d %H:%M:%S"), station_city,limit))
print(df_pivot)
else:
print("no data!")
def rain_day_max(bag,date_str,station_city):
rain_load(bag, date_str,True)
#station_city=['新竹縣','新竹市']
df_info = gd['rain-station']
df = gd['rain-rainData']
#df_info.head()
f1=df_info['city'].isin(station_city)
#df_info[f1].values.tolist()
#df_info[f1]['city'].unique()
stations = df_info[f1]['stationID'].tolist()
#f_time=df['timeGMT8'].isin(time_set)
f_station=df['stationID'].isin(stations)
df_f = df[f_station]
df_agg=df_f.groupby('stationName').agg({'now': ['max']})
bag['rain_day_max']=df_agg
def rain_load_range(bag,date_start, date_end, limit=0,reload=False):
df_info = api_to_csv("rain-station",None)
#date_str = date.today() # 2021-06-07
#print("Today is %s" %(date_str))
filename=api_to_csv_range(date_start,date_end,"rain-rainData",["20","26","120","122"],"time")
dest_name="%s_GMT8.csv" %(filename[:-4])
df=csv_add_gmt8(filename,"time", dest_name )
#df=api_to_csv_range("rain-rainData",[date_str,"20","26","120","122"],reload)
if 1:
#df["timeGMT8"] = df['time'].apply(date_to_gmt8)
df["timeGMT8"] = | pd.to_datetime(df['timeGMT8']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 11 20:08:48 2021
@author: jan_c
"""
import pandas as pd
from tkinter import *
from tkinter import filedialog
if __name__ == '__main__':
def frame():
def abrir_archivo():
global archivo
archivo = filedialog.askopenfilename(title="Abrir archivo .xlsx", initialdir="F:/", filetypes=(("Archivo .xlsx", "*.xlsx"), ("Archivo .xls", "*.xls")))
raiz.destroy()
raiz = Tk()
mi_frame = Frame(raiz, width=200, height=60)
mi_frame.pack()
boton = Button(raiz, text="Abrir archivo", command=abrir_archivo)
boton.pack(fill=X)
boton.config(cursor="hand2")
boton.config(bd=4)
boton.config(relief="groove")
raiz.mainloop()
return archivo
archivo = frame()
#Leer archivo de entrada
datos = pd.read_excel(archivo, sheet_name="Resumen de resultados", header=4)
# Se filtran las columnas de interes y se generan los datos ordenados
filtro_fluorescencia = datos.filter(regex = "Fluorescencia") # filtra columnas
datos_f = pd.DataFrame(filtro_fluorescencia)
muestra = pd.DataFrame(datos["Muestra"])
datos_gen = pd.concat([muestra, datos_f], axis=1) # Concatenamos los datos
datos_generales = datos_gen.set_index("Muestra")
print(datos_generales)
# Se prepara el archivo de salida
with | pd.ExcelWriter(archivo[:-5] + "_salida" + ".xlsx") | pandas.ExcelWriter |
from pandas.util.py3compat import StringIO
import unittest
import sqlite3
import sys
import numpy as np
import pandas.io.sql as sql
import pandas.util.testing as tm
from pandas import Series, Index
class TestSQLite(unittest.TestCase):
def setUp(self):
self.db = sqlite3.connect(':memory:')
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
create_sql = sql.get_sqlite_schema(frame, 'test')
self.db.execute(create_sql)
cur = self.db.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = sql.format_query(ins, *row)
sql.tquery(fmt_sql, cur=cur)
self.db.commit()
result = sql.read_frame("select * from test", con=self.db)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_sqlite_schema(frame, 'test')
self.db.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.ix[0]
sql.execute(ins, self.db, params=tuple(row))
self.db.commit()
result = sql.read_frame("select * from test", self.db)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
self.db.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.db)
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.execute,
'INSERT INTO test VALUES("foo", "bar", 7)',
self.db)
finally:
sys.stdout = sys.__stdout__
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
self.db.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
self.db.close()
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.tquery, "select * from test",
con=self.db)
finally:
sys.stdout = sys.__stdout__
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.write_frame(frame, name='test_table', con=self.db)
result = sql.read_frame("select * from test_table", self.db)
# HACK!
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
frame2['Idx'] = Index(range(len(frame2))) + 10
sql.write_frame(frame2, name='test_table2', con=self.db)
result = sql.read_frame("select * from test_table2", self.db,
index_col='Idx')
expected = frame.copy()
expected.index = Index(range(len(frame2))) + 10
tm.assert_frame_equal(expected, result)
def test_tquery(self):
frame = tm.makeTimeDataFrame()
sql.write_frame(frame, name='test_table', con=self.db)
result = sql.tquery("select A from test_table", self.db)
expected = frame.A
result = Series(result, frame.index)
tm.assert_series_equal(result, expected)
try:
sys.stdout = StringIO()
self.assertRaises(sqlite3.OperationalError, sql.tquery,
'select * from blah', con=self.db)
self.assertRaises(sqlite3.OperationalError, sql.tquery,
'select * from blah', con=self.db, retry=True)
finally:
sys.stdout = sys.__stdout__
def test_uquery(self):
frame = tm.makeTimeDataFrame()
sql.write_frame(frame, name='test_table', con=self.db)
stmt = 'INSERT INTO test_table VALUES(2.314, -123.1, 1.234, 2.3)'
self.assertEqual( | sql.uquery(stmt, con=self.db) | pandas.io.sql.uquery |
import pandas as pd
import urllib.request
import numpy as np
import shapefile
from datetime import datetime
from zipfile import ZipFile
import pandasql as ps
import requests
import json
import pkg_resources
def softmax(x):
if np.max(x) > 1:
e_x = np.exp(x/np.max(x))
else:
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
## getProvinceBoundaryBox function is to get the cordinate details from Mapbox API for ITALY
## Parameter Needed - Province Name
def getProvinceBoundaryBox(provinceName):
Place_Details = requests.get(
'http://api.mapbox.com/geocoding/v5/mapbox.places/' + provinceName + '%20province%20Italy.json?access_token=<KEY>').json()[
'features']
for eachPlace in Place_Details:
try:
if eachPlace['context'][0]['text'] == 'Italy' or eachPlace['context'][1]['text'] == 'Italy':
getBbox = eachPlace['bbox']
except:
continue
return getBbox
# The below function used to get the USA Patient Data Automatically from HARVARD DATABASE COVID Patient Database and will create a timeseries patient file along with population of the Area at county along with a USA County file
## Parameter Needed - Target Directory to save the File
def fetch_us_patientdata(tgtdir):
url='https://dataverse.harvard.edu/api/access/datafile/:persistentId?persistentId=doi:10.7910/DVN/HIDLTK/7NWUDK'
urllib.request.urlretrieve(url,tgtdir+'/us_county_confirmed_cases.tab')
latest_data = pd.read_csv(tgtdir+'/us_county_confirmed_cases.tab',sep='\t')
allcols = list(latest_data.columns)
datecols = allcols[allcols.index('HHD10')+1:]
latest_data = latest_data[['COUNTY', 'NAME']+datecols]
datecolsmod=[datetime.strptime(i,'%m/%d/%Y').strftime('%Y%m%d') for i in datecols]
latest_data.columns = ['cfips', 'county']+datecolsmod
latest_data = latest_data.melt(id_vars=['cfips', 'county'], var_name='data_date', value_name='no_pat')
latest_data['county']=latest_data['county'].apply(lambda x : x.split(' County')[0])
url='https://dataverse.harvard.edu/api/access/datafile/:persistentId?persistentId=doi:10.7910/DVN/HIDLTK/OFVFPY'
urllib.request.urlretrieve(url,tgtdir+'/COUNTY_MAP.zip')
zip = ZipFile(tgtdir+'/COUNTY_MAP.zip')
zip.extractall(tgtdir)
sf = shapefile.Reader(tgtdir+"/CO_CARTO")
shape_df = pd.DataFrame()
shapes = sf.shapes()
records = sf.records()
for eachrec in range(len(records)):
eachRec = {}
shapebbbox = shapes[eachrec].bbox
shapelat = (shapebbbox[1] + shapebbbox[3]) / 2
shapelong = (shapebbbox[0] + shapebbbox[2]) / 2
eachRec['lat'] = [shapelat]
eachRec['long'] = [shapelong]
eachRec['county_fips'] = [records[eachrec][0]]
eachRec['county_name'] = [records[eachrec][1]]
eachRec['POP'] = [records[eachrec][10]]
eachRec['HHD'] = [records[eachrec][11]]
shape_df = shape_df.append(pd.DataFrame.from_dict(eachRec))
us_counties = shape_df
us_counties['county_name'] = us_counties['county_name'].apply(lambda x: x.split(' County')[0])
us_counties['county_fips'] = us_counties['county_fips'].apply(lambda x: int(x))
us_counties.columns = ['lat','long', 'cfips', 'county', 'pop', 'HHD']
full_data = pd.merge(latest_data, us_counties, on=['cfips', 'county'])
if sum(full_data['no_pat']) != sum(latest_data['no_pat']):
print("fetch failed")
raise
full_data['no_pat'] = full_data.groupby(['cfips'])['no_pat'].apply(lambda x: x.cummax())
full_data['new_pat'] = full_data.groupby(['lat','long'])['no_pat'].diff()
full_data = full_data.dropna()
us_counties.to_csv(tgtdir+'USA_counties.csv',index=False)
full_data.to_csv(tgtdir+'USA_covid_data_final.csv',index=False)
print(' USA Patient Data Created under Directory :'+tgtdir)
## Below function will create the China COVID19 time series Patient file by abosrving data from Harvard Database and it will create County file along with Population Data by county/province
## Parameter Needed - Target Directory to save the File
def fetch_china_patientdata(tgtdir):
url = 'https://dataverse.harvard.edu/api/access/datafile/3781338?format=original&gbrecs=true'
urllib.request.urlretrieve(url, tgtdir+'/City_Confirmed_Map_China.csv')
latest_data = pd.read_csv(tgtdir+'/City_Confirmed_Map_China.csv')
latest_data = latest_data[
['GbCity', 'GbProv', 'City_EN', 'Prov_EN', 'N_C_0115', 'N_C_0116', 'N_C_0117', 'N_C_0118', 'N_C_0119',
'N_C_0120', 'N_C_0121', 'N_C_0122', 'N_C_0123', 'N_C_0124', 'N_C_0125', 'N_C_0126', 'N_C_0127', 'N_C_0128',
'N_C_0129', 'N_C_0130', 'N_C_0131', 'N_C_0201', 'N_C_0202', 'N_C_0203', 'N_C_0204', 'N_C_0205', 'N_C_0206',
'N_C_0207', 'N_C_0208', 'N_C_0209', 'N_C_0210', 'N_C_0211', 'N_C_0212', 'N_C_0213', 'N_C_0214', 'N_C_0215',
'N_C_0216', 'N_C_0217', 'N_C_0218', 'N_C_0219', 'N_C_0220', 'N_C_0221', 'N_C_0222', 'N_C_0223', 'N_C_0224',
'N_C_0225', 'N_C_0226', 'N_C_0227', 'N_C_0228', 'N_C_0229', 'N_C_0301', 'N_C_0302', 'N_C_0303', 'N_C_0304',
'N_C_0305', 'N_C_0306', 'N_C_0307', 'N_C_0308', 'N_C_0309', 'N_C_0310', 'N_C_0311', 'N_C_0312', 'N_C_0313',
'N_C_0314', 'N_C_0315', 'N_C_0316', 'N_C_0317', 'N_C_0318', 'T_C_0115', 'T_C_0116', 'T_C_0117', 'T_C_0118',
'T_C_0119', 'T_C_0120', 'T_C_0121', 'T_C_0122', 'T_C_0123', 'T_C_0124', 'T_C_0125', 'T_C_0126', 'T_C_0127',
'T_C_0128', 'T_C_0129', 'T_C_0130', 'T_C_0131', 'T_C_0201', 'T_C_0202', 'T_C_0203', 'T_C_0204', 'T_C_0205',
'T_C_0206', 'T_C_0207', 'T_C_0208', 'T_C_0209', 'T_C_0210', 'T_C_0211', 'T_C_0212', 'T_C_0213', 'T_C_0214',
'T_C_0215', 'T_C_0216', 'T_C_0217', 'T_C_0218', 'T_C_0219', 'T_C_0220', 'T_C_0221', 'T_C_0222', 'T_C_0223',
'T_C_0224', 'T_C_0225', 'T_C_0226', 'T_C_0227', 'T_C_0228', 'T_C_0229', 'T_C_0301', 'T_C_0302', 'T_C_0303',
'T_C_0304', 'T_C_0305', 'T_C_0306', 'T_C_0307', 'T_C_0308', 'T_C_0309', 'T_C_0310', 'T_C_0311', 'T_C_0312',
'T_C_0313', 'T_C_0314', 'T_C_0315', 'T_C_0316', 'T_C_0317', 'T_C_0318']]
latest_data['City_EN'] = latest_data['City_EN'].apply(lambda x: x.split('(')[0])
latest_data.columns = ['GbCity', 'GbProv', 'city', 'Province', 'N_C_0115', 'N_C_0116', 'N_C_0117', 'N_C_0118',
'N_C_0119', 'N_C_0120', 'N_C_0121', 'N_C_0122', 'N_C_0123', 'N_C_0124', 'N_C_0125',
'N_C_0126', 'N_C_0127', 'N_C_0128', 'N_C_0129', 'N_C_0130', 'N_C_0131', 'N_C_0201',
'N_C_0202', 'N_C_0203', 'N_C_0204', 'N_C_0205', 'N_C_0206', 'N_C_0207', 'N_C_0208',
'N_C_0209', 'N_C_0210', 'N_C_0211', 'N_C_0212', 'N_C_0213', 'N_C_0214', 'N_C_0215',
'N_C_0216', 'N_C_0217', 'N_C_0218', 'N_C_0219', 'N_C_0220', 'N_C_0221', 'N_C_0222',
'N_C_0223', 'N_C_0224', 'N_C_0225', 'N_C_0226', 'N_C_0227', 'N_C_0228', 'N_C_0229',
'N_C_0301', 'N_C_0302', 'N_C_0303', 'N_C_0304', 'N_C_0305', 'N_C_0306', 'N_C_0307',
'N_C_0308', 'N_C_0309', 'N_C_0310', 'N_C_0311', 'N_C_0312', 'N_C_0313', 'N_C_0314',
'N_C_0315', 'N_C_0316', 'N_C_0317', 'N_C_0318', 'T_C_0115', 'T_C_0116', 'T_C_0117',
'T_C_0118', 'T_C_0119', 'T_C_0120', 'T_C_0121', 'T_C_0122', 'T_C_0123', 'T_C_0124',
'T_C_0125', 'T_C_0126', 'T_C_0127', 'T_C_0128', 'T_C_0129', 'T_C_0130', 'T_C_0131',
'T_C_0201', 'T_C_0202', 'T_C_0203', 'T_C_0204', 'T_C_0205', 'T_C_0206', 'T_C_0207',
'T_C_0208', 'T_C_0209', 'T_C_0210', 'T_C_0211', 'T_C_0212', 'T_C_0213', 'T_C_0214',
'T_C_0215', 'T_C_0216', 'T_C_0217', 'T_C_0218', 'T_C_0219', 'T_C_0220', 'T_C_0221',
'T_C_0222', 'T_C_0223', 'T_C_0224', 'T_C_0225', 'T_C_0226', 'T_C_0227', 'T_C_0228',
'T_C_0229', 'T_C_0301', 'T_C_0302', 'T_C_0303', 'T_C_0304', 'T_C_0305', 'T_C_0306',
'T_C_0307', 'T_C_0308', 'T_C_0309', 'T_C_0310', 'T_C_0311', 'T_C_0312', 'T_C_0313',
'T_C_0314', 'T_C_0315', 'T_C_0316', 'T_C_0317', 'T_C_0318']
latest_data = latest_data.melt(id_vars=['GbCity', 'GbProv', 'city', 'Province'], var_name='Date',
value_name='No of Patient')
New_Patients = ps.sqldf(
''' select GbCity,GbProv,city,Province,Date,"No of Patient" from latest_data where Date like "N_C_%" ''',
locals())
New_Patients['Date'] = New_Patients['Date'].apply(lambda x: '2020' + x.split('N_C_')[1])
New_Patients.columns = ['GbCity', 'GbProv', 'city', 'Province', 'Date', 'New Patient Count']
Total_Patients = ps.sqldf(
''' select GbCity,GbProv,city,Province,Date,"No of Patient" from latest_data where Date like "T_C_%" ''',
locals())
Total_Patients['Date'] = Total_Patients['Date'].apply(lambda x: '2020' + x.split('T_C_')[1])
Total_Patients.columns = ['GbCity', 'GbProv', 'city', 'Province', 'Date', 'Total Patient Count']
latest_data_Normalized = pd.merge(New_Patients, Total_Patients, on=['GbCity', 'GbProv', 'city', 'Province', 'Date'])
latest_data_Normalized['GbCity'] = latest_data_Normalized['GbCity'].apply(lambda x: str(x))
latest_data_Normalized['GbProv'] = latest_data_Normalized['GbProv'].apply(lambda x: str(x))
url='https://dvn-cloud.s3.amazonaws.com/10.7910/DVN/MR5IJN/1710944b44b-ce6a2df0b32e?response-content-disposition=attachment%3B%20filename%2A%3DUTF-8%27%27china_city_basemap.zip&response-content-type=application%2Fzipped-shapefile&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200408T040239Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=AKIAIEJ3NV7UYCSRJC7A%2F20200408%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Signature=ed0cbb34d3e1a129167cbd353afc469d13ddaf4dc14520366df279219b422957'
urllib.request.urlretrieve(url,tgtdir+'/china_city_basemap.zip')
zip = ZipFile(tgtdir+'/china_city_basemap.zip')
zip.extractall()
sf = shapefile.Reader(tgtdir+"/china_city_basemap")
shape_df = | pd.DataFrame() | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
from meterstick import utils
import mock
import numpy as np
import pandas as pd
from pandas import testing
from scipy import stats
import unittest
class DistributionTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU']
})
sum_x = metrics.Sum('X')
distribution = operations.Distribution('grp', sum_x)
def test_distribution(self):
output = self.distribution.compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_normalize(self):
output = operations.Normalize('grp', self.sum_x).compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_over_multiple_columns(self):
df = pd.DataFrame({
'X': [2, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'platform': ['desktop', 'mobile', 'desktop', 'mobile']
})
sum_x = metrics.Sum('X')
dist = operations.Distribution(['grp', 'platform'], sum_x)
output = dist.compute_on(df, 'country')
expected = pd.DataFrame({
'Distribution of sum(X)': [1., 0.5, 0.25, 0.25],
'country': ['EU', 'US', 'US', 'US'],
'grp': ['B', 'A', 'A', 'B'],
'platform': ['mobile', 'desktop', 'mobile', 'desktop']
})
expected.set_index(['country', 'grp', 'platform'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_melted(self):
output = self.distribution.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0.25, 0.75],
'grp': ['A', 'B'],
'Metric': ['Distribution of sum(X)', 'Distribution of sum(X)']
})
expected.set_index(['Metric', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby(self):
output = self.distribution.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Distribution of sum(X)': [1., 2. / 3, 1. / 3],
'grp': ['B', 'A', 'B'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby_melted(self):
output = self.distribution.compute_on(self.df, 'country', melted=True)
expected = pd.DataFrame({
'Value': [1., 2. / 3, 1. / 3],
'grp': ['B', 'A', 'B'],
'Metric': ['Distribution of sum(X)'] * 3,
'country': ['EU', 'US', 'US']
})
expected.set_index(['Metric', 'country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5, 0, 1, 2, 3.5],
'grp': ['A', 'A', 'B', 'B'] * 2,
'country': ['US', 'US', 'US', 'EU'] * 2,
'grp0': ['foo'] * 4 + ['bar'] * 4
})
output = self.distribution.compute_on(df, ['grp0', 'country'])
bar = self.distribution.compute_on(df[df.grp0 == 'bar'], 'country')
foo = self.distribution.compute_on(df[df.grp0 == 'foo'], 'country')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
testing.assert_frame_equal(output, expected)
def test_distribution_multiple_metrics(self):
metric = metrics.MetricList((self.sum_x, metrics.Count('X')))
metric = operations.Distribution('grp', metric)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
{
'Distribution of sum(X)': [0.25, 0.75],
'Distribution of count(X)': [0.5, 0.5]
},
index=['A', 'B'],
columns=['Distribution of sum(X)', 'Distribution of count(X)'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_where(self):
metric = operations.Distribution('grp', self.sum_x, where='country == "US"')
metric_no_filter = operations.Distribution('grp', self.sum_x)
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.country == 'US'])
testing.assert_frame_equal(output, expected)
def test_distribution_pipeline(self):
output = self.sum_x | operations.Distribution('grp') | metrics.compute_on(
self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_cache_key(self):
sum_x = metrics.Sum('X', 'X')
metric = operations.Distribution('grp', sum_x)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby('grp').X.sum(), sum_x.get_cached(42, 'grp'))
self.assertTrue(metric.in_cache(42))
def test_distribution_internal_caching_cleaned_up(self):
sum_x = metrics.Sum('X')
m = operations.Distribution('grp', sum_x)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(sum_x.cache_key)
self.assertIsNone(m.cache_key)
def test_distribution_with_jackknife_internal_caching_cleaned_up(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'cookie': [1, 2, 1, 2]
})
sum_x = metrics.Sum('X')
m = operations.Distribution('grp', sum_x)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(sum_x.cache_key)
class CumulativeDistributionTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['B', 'B', 'A', 'A'],
'country': ['US', 'US', 'US', 'EU']
})
sum_x = metrics.Sum('X')
metric = operations.CumulativeDistribution('grp', sum_x)
def test_cumulative_distribution(self):
output = self.metric.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.75, 1.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_over_multiple_columns(self):
df = pd.DataFrame({
'X': [2, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'platform': ['desktop', 'mobile', 'desktop', 'mobile']
})
sum_x = metrics.Sum('X')
cum_dict = operations.CumulativeDistribution(['grp', 'platform'], sum_x)
output = cum_dict.compute_on(df, 'country')
expected = pd.DataFrame({
'Cumulative Distribution of sum(X)': [1., 0.5, 0.75, 1],
'country': ['EU', 'US', 'US', 'US'],
'grp': ['B', 'A', 'A', 'B'],
'platform': ['mobile', 'desktop', 'mobile', 'desktop']
})
expected.set_index(['country', 'grp', 'platform'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_melted(self):
output = self.metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0.75, 1.],
'grp': ['A', 'B'],
'Metric': ['Cumulative Distribution of sum(X)'] * 2
})
expected.set_index(['Metric', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_splitby(self):
output = self.metric.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Cumulative Distribution of sum(X)': [1., 1. / 3, 1.],
'grp': ['A', 'A', 'B'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_splitby_melted(self):
output = self.metric.compute_on(self.df, 'country', melted=True)
expected = pd.DataFrame({
'Value': [1., 1. / 3, 1.],
'grp': ['A', 'A', 'B'],
'Metric': ['Cumulative Distribution of sum(X)'] * 3,
'country': ['EU', 'US', 'US']
})
expected.set_index(['Metric', 'country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5, 0, 2, 1.5, 3],
'grp': ['B', 'B', 'A', 'A'] * 2,
'country': ['US', 'US', 'US', 'EU'] * 2,
'grp0': ['foo'] * 4 + ['bar'] * 4
})
output = self.metric.compute_on(df, ['grp0', 'country'])
output.sort_index(level=['grp0', 'grp', 'country'], inplace=True)
bar = self.metric.compute_on(df[df.grp0 == 'bar'], 'country')
foo = self.metric.compute_on(df[df.grp0 == 'foo'], 'country')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
expected = expected.sort_index(level=['grp0', 'grp', 'country'])
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_order(self):
metric = operations.CumulativeDistribution('grp', self.sum_x, ('B', 'A'))
output = metric.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.25, 1.]},
index=['B', 'A'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_ascending(self):
metric = operations.CumulativeDistribution(
'grp', self.sum_x, ascending=False)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.25, 1.]},
index=['B', 'A'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_order_splitby(self):
metric = operations.CumulativeDistribution('grp', self.sum_x, ('B', 'A'))
output = metric.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Cumulative Distribution of sum(X)': [1., 2. / 3, 1.],
'grp': ['A', 'B', 'A'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_multiple_metrics(self):
metric = metrics.MetricList((self.sum_x, metrics.Count('X')))
metric = operations.CumulativeDistribution('grp', metric)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
{
'Cumulative Distribution of sum(X)': [0.75, 1.],
'Cumulative Distribution of count(X)': [0.5, 1.]
},
index=['A', 'B'],
columns=[
'Cumulative Distribution of sum(X)',
'Cumulative Distribution of count(X)'
])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_where(self):
metric = operations.CumulativeDistribution(
'grp', metrics.Count('X'), where='country == "US"')
metric_no_filter = operations.CumulativeDistribution(
'grp', metrics.Count('X'))
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.country == 'US'])
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_pipeline(self):
output = self.sum_x | operations.CumulativeDistribution(
'grp') | metrics.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.75, 1.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_cache_key(self):
sum_x = metrics.Sum('X', 'X')
metric = operations.CumulativeDistribution('grp', sum_x)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby('grp').X.sum(), sum_x.get_cached(42, 'grp'))
self.assertTrue(metric.in_cache(42))
def test_cumulative_distribution_internal_caching_cleaned_up(self):
sum_x = metrics.Sum('X')
m = operations.CumulativeDistribution('grp', sum_x)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(sum_x.cache_key)
self.assertIsNone(m.cache_key)
def test_cumulative_distribution_with_jackknife_internal_caching_cleaned_up(
self):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['B', 'B', 'A', 'A'],
'country': ['US', 'US', 'US', 'EU'],
'cookie': [1, 2, 1, 2]
})
sum_x = metrics.Sum('X')
m = operations.CumulativeDistribution('grp', sum_x)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(sum_x.cache_key)
class PercentChangeTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C']
})
metric_lst = metrics.MetricList((metrics.Sum('X'), metrics.Count('X')))
def test_percent_change(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[150., 0.]],
columns=['sum(X) Percent Change', 'count(X) Percent Change'],
index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_percent_change_include_baseline(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[0., 0.], [150., 0.]],
columns=['sum(X) Percent Change', 'count(X) Percent Change'],
index=[0, 1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_percent_change_melted(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [150., 0.],
'Metric': ['sum(X) Percent Change', 'count(X) Percent Change'],
'Condition': [1, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_melted_include_baseline(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0., 150., 0., 0.],
'Metric': [
'sum(X) Percent Change', 'sum(X) Percent Change',
'count(X) Percent Change', 'count(X) Percent Change'
],
'Condition': [0, 1, 0, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_splitby(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame(
{
'sum(X) Percent Change': [0., 100. / 3, 0., 200. / 3, np.nan],
'count(X) Percent Change': [0., -50., 0., 0., np.nan],
'Condition': [0, 1, 0, 1, 1],
'grp': ['A', 'A', 'B', 'B', 'C']
},
columns=[
'sum(X) Percent Change', 'count(X) Percent Change', 'Condition',
'grp'
])
expected.set_index(['grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_splitby_melted(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [0., 100. / 3, 0., 200. / 3, np.nan, 0., -50., 0., 0., np.nan],
'Metric': ['sum(X) Percent Change'] * 5 +
['count(X) Percent Change'] * 5,
'Condition': [0, 1, 0, 1, 1] * 2,
'grp': ['A', 'A', 'B', 'B', 'C'] * 2
})
expected.set_index(['Metric', 'grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6, 1.2, 2.2, 3.2, 4.2, 5.2, 6.5],
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A', 'A', 'B', 'A', 'B', 'C'] * 2,
'grp0': ['foo'] * 6 + ['bar'] * 6
})
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(df, ['grp0', 'grp'])
bar = metric.compute_on(df[df.grp0 == 'bar'], 'grp')
foo = metric.compute_on(df[df.grp0 == 'foo'], 'grp')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
expected.sort_index(level=['grp0', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_multiple_condition_columns(self):
df = self.df.copy()
metric = operations.PercentChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst)
output = metric.compute_on(df)
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.PercentChange('Condition_and_grp', (0, 'A'),
self.metric_lst)
expected = expected_metric.compute_on(df)
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_percent_change_multiple_condition_columns_include_baseline(self):
df = self.df.copy()
metric = operations.PercentChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst, True)
output = metric.compute_on(df)
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.PercentChange('Condition_and_grp', (0, 'A'),
self.metric_lst, True)
expected = expected_metric.compute_on(df)
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_percent_change_multiple_condition_columns_splitby(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'B'],
'grp2': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar']
})
metric = operations.PercentChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst)
output = metric.compute_on(df, 'grp2')
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.PercentChange('Condition_and_grp', (0, 'A'),
self.metric_lst)
expected = expected_metric.compute_on(df, 'grp2')
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_percent_change_multiple_condition_columns_include_baseline_splitby(
self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'B'],
'grp2': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar']
})
metric = operations.PercentChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst, True)
output = metric.compute_on(df, 'grp2')
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.PercentChange('Condition_and_grp', (0, 'A'),
self.metric_lst, True)
expected = expected_metric.compute_on(df, 'grp2')
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_percent_change_where(self):
metric = operations.PercentChange(
'Condition', 0, metrics.Sum('X'), where='grp == "A"')
metric_no_filter = operations.PercentChange('Condition', 0,
metrics.Sum('X'))
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.grp == 'A'])
testing.assert_frame_equal(output, expected)
def test_percent_change_pipeline(self):
metric = operations.PercentChange('Condition', 0)
output = self.metric_lst | metric | metrics.compute_on(self.df)
expected = pd.DataFrame(
[[150., 0.]],
columns=['sum(X) Percent Change', 'count(X) Percent Change'],
index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_percent_change_cache_key(self):
sum_x = metrics.Sum('X', 'X')
metric = operations.PercentChange('Condition', 0, sum_x)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby('Condition').X.sum(), sum_x.get_cached(42, 'Condition'))
self.assertTrue(metric.in_cache(42))
def test_percent_change_internal_caching_cleaned_up(self):
sum_x = metrics.Sum('X')
m = operations.PercentChange('Condition', 0, sum_x)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(sum_x.cache_key)
self.assertIsNone(m.cache_key)
def test_percent_change_with_jackknife_internal_caching_cleaned_up(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C'],
'cookie': [1, 2, 3] * 2
})
sum_x = metrics.Sum('X')
m = operations.PercentChange('Condition', 0, sum_x)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(sum_x.cache_key)
class AbsoluteChangeTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C']
})
metric_lst = metrics.MetricList((metrics.Sum('X'), metrics.Count('X')))
def test_absolute_change(self):
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[9, 0]],
columns=['sum(X) Absolute Change', 'count(X) Absolute Change'],
index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_absolute_change_include_baseline(self):
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[0, 0], [9, 0]],
columns=['sum(X) Absolute Change', 'count(X) Absolute Change'],
index=[0, 1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_absolute_change_melted(self):
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [9, 0],
'Metric': ['sum(X) Absolute Change', 'count(X) Absolute Change'],
'Condition': [1, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_absolute_change_melted_include_baseline(self):
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0, 9, 0, 0],
'Metric': [
'sum(X) Absolute Change', 'sum(X) Absolute Change',
'count(X) Absolute Change', 'count(X) Absolute Change'
],
'Condition': [0, 1, 0, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_absolute_change_splitby(self):
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame(
{
'sum(X) Absolute Change': [0., 1., 0., 2., np.nan],
'count(X) Absolute Change': [0., -1., 0., 0., np.nan],
'Condition': [0, 1, 0, 1, 1],
'grp': ['A', 'A', 'B', 'B', 'C']
},
columns=[
'sum(X) Absolute Change', 'count(X) Absolute Change', 'Condition',
'grp'
])
expected.set_index(['grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_absolute_change_splitby_melted(self):
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [0., 1., 0., 2., np.nan, 0., -1., 0., 0., np.nan],
'Metric': ['sum(X) Absolute Change'] * 5 +
['count(X) Absolute Change'] * 5,
'Condition': [0, 1, 0, 1, 1] * 2,
'grp': ['A', 'A', 'B', 'B', 'C'] * 2
})
expected.set_index(['Metric', 'grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_absolute_change_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6, 1.2, 2.2, 3.2, 4.2, 5.2, 6.5],
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A', 'A', 'B', 'A', 'B', 'C'] * 2,
'grp0': ['foo'] * 6 + ['bar'] * 6
})
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(df, ['grp0', 'grp'])
bar = metric.compute_on(df[df.grp0 == 'bar'], 'grp')
foo = metric.compute_on(df[df.grp0 == 'foo'], 'grp')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
expected.sort_index(level=['grp0', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_absolute_change_multiple_condition_columns(self):
df = self.df.copy()
metric = operations.AbsoluteChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst)
output = metric.compute_on(df)
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.AbsoluteChange('Condition_and_grp', (0, 'A'),
self.metric_lst)
expected = expected_metric.compute_on(df)
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_absolute_change_multiple_condition_columns_include_baseline(self):
df = self.df.copy()
metric = operations.AbsoluteChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst, True)
output = metric.compute_on(df)
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.AbsoluteChange('Condition_and_grp', (0, 'A'),
self.metric_lst, True)
expected = expected_metric.compute_on(df)
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_absolute_change_multiple_condition_columns_splitby(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'B'],
'grp2': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar']
})
metric = operations.AbsoluteChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst)
output = metric.compute_on(df, 'grp2')
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.AbsoluteChange('Condition_and_grp', (0, 'A'),
self.metric_lst)
expected = expected_metric.compute_on(df, 'grp2')
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_absolute_change_multiple_condition_columns_include_baseline_splitby(
self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'B'],
'grp2': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar']
})
metric = operations.AbsoluteChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst, True)
output = metric.compute_on(df, 'grp2')
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.AbsoluteChange('Condition_and_grp', (0, 'A'),
self.metric_lst, True)
expected = expected_metric.compute_on(df, 'grp2')
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_absolute_change_where(self):
metric = operations.AbsoluteChange(
'Condition', 0, metrics.Sum('X'), where='grp == "A"')
metric_no_filter = operations.AbsoluteChange('Condition', 0,
metrics.Sum('X'))
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.grp == 'A'])
testing.assert_frame_equal(output, expected)
def test_absolute_change_pipeline(self):
metric = operations.AbsoluteChange('Condition', 0)
output = self.metric_lst | metric | metrics.compute_on(self.df)
expected = pd.DataFrame(
[[9, 0]],
columns=['sum(X) Absolute Change', 'count(X) Absolute Change'],
index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_absolute_change_internal_caching(self):
sum_x = metrics.Sum('X')
pct = operations.PercentChange('Condition', 0, sum_x)
ab = operations.AbsoluteChange('Condition', 0, sum_x)
metric = metrics.MetricList((pct, ab))
with mock.patch.object(
sum_x, 'compute_through', wraps=sum_x.compute_through) as mock_fn:
metric.compute_on(self.df)
mock_fn.assert_called_once()
def test_absolute_change_cache_key(self):
sum_x = metrics.Sum('X', 'X')
metric = operations.AbsoluteChange('Condition', 0, sum_x)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby('Condition').X.sum(), sum_x.get_cached(42, 'Condition'))
self.assertTrue(metric.in_cache(42))
def test_absolute_change_internal_caching_cleaned_up(self):
sum_x = metrics.Sum('X')
m = operations.AbsoluteChange('Condition', 0, sum_x)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(sum_x.cache_key)
self.assertIsNone(m.cache_key)
def test_absolute_change_with_jackknife_internal_caching_cleaned_up(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C'],
'cookie': [1, 2, 3] * 2
})
sum_x = metrics.Sum('X')
m = operations.AbsoluteChange('Condition', 0, sum_x)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(sum_x.cache_key)
class MHTests(unittest.TestCase):
df = pd.DataFrame({
'clicks': [1, 3, 2, 3, 1, 2],
'conversions': [1, 0, 1, 2, 1, 1],
'Id': [1, 2, 3, 1, 2, 3],
'Condition': [0, 0, 0, 1, 1, 1]
})
sum_click = metrics.Sum('clicks')
sum_conv = metrics.Sum('conversions')
cvr = metrics.Ratio('conversions', 'clicks', 'cvr')
metric_lst = metrics.MetricList((sum_conv / sum_click, cvr))
def test_mh(self):
metric = operations.MH('Condition', 0, 'Id', self.cvr)
output = metric.compute_on(self.df)
expected = pd.DataFrame([[40.]], columns=['cvr MH Ratio'], index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_mh_include_baseline(self):
metric = operations.MH('Condition', 0, 'Id', self.metric_lst, True)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[0., 0.], [40., 40.]],
columns=['sum(conversions) / sum(clicks) MH Ratio', 'cvr MH Ratio'],
index=[0, 1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_mh_melted(self):
metric = operations.MH('Condition', 0, 'Id', self.metric_lst)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [40., 40.],
'Metric': ['sum(conversions) / sum(clicks) MH Ratio', 'cvr MH Ratio'],
'Condition': [1, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_mh_melted_include_baseline(self):
metric = operations.MH('Condition', 0, 'Id', self.metric_lst, True)
output = metric.compute_on(self.df, melted=True)
expected = expected = pd.DataFrame({
'Value': [0., 40., 0., 40.],
'Metric': [
'sum(conversions) / sum(clicks) MH Ratio',
'sum(conversions) / sum(clicks) MH Ratio', 'cvr MH Ratio',
'cvr MH Ratio'
],
'Condition': [0, 1, 0, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_mh_splitby(self):
df = pd.DataFrame({
'clicks': [1, 3, 2, 3, 1, 2] * 2,
'conversions': [1, 0, 1, 2, 1, 1, 1, 0, 1, 2, 1, 2],
'Id': [1, 2, 3, 1, 2, 3] * 2,
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A'] * 6 + ['B'] * 6
})
metric = operations.MH('Condition', 0, 'Id', self.metric_lst)
output = metric.compute_on(df, 'grp')
output.sort_index(level='grp', inplace=True) # For Py2
expected = pd.DataFrame([['A', 1, 40., 40.], ['B', 1, 80., 80.]],
columns=[
'grp', 'Condition',
'sum(conversions) / sum(clicks) MH Ratio',
'cvr MH Ratio'
])
expected.set_index(['grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_mh_splitby_melted(self):
df = pd.DataFrame({
'clicks': [1, 3, 2, 3, 1, 2] * 2,
'conversions': [1, 0, 1, 2, 1, 1, 1, 0, 1, 2, 1, 2],
'Id': [1, 2, 3, 1, 2, 3] * 2,
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A'] * 6 + ['B'] * 6
})
metric = operations.MH('Condition', 0, 'Id', self.metric_lst, True)
output = metric.compute_on(df, 'grp', melted=True)
output.sort_index(
level=['Metric', 'grp'], ascending=[False, True],
inplace=True) # For Py2
expected = pd.DataFrame({
'Value': [0., 40., 0., 80., 0., 40., 0., 80.],
'Metric': ['sum(conversions) / sum(clicks) MH Ratio'] * 4 +
['cvr MH Ratio'] * 4,
'Condition': [0, 1] * 4,
'grp': ['A', 'A', 'B', 'B'] * 2
})
expected.set_index(['Metric', 'grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_mh_multiple_condition_columns(self):
df = pd.DataFrame({
'clicks': [1, 3, 2, 3, 1, 2] * 2,
'conversions': [1, 0, 1, 2, 1, 1] * 2,
'Id': [1, 2, 3, 1, 2, 3] * 2,
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A', 'B'] * 6,
})
metric = operations.MH(['Condition', 'grp'], (0, 'A'), 'Id',
self.metric_lst)
output = metric.compute_on(df)
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.MH('Condition_and_grp', (0, 'A'), 'Id',
self.metric_lst)
expected = expected_metric.compute_on(df)
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
| testing.assert_frame_equal(output, expected) | pandas.testing.assert_frame_equal |
from typing import Optional, Union
import numpy as np
import pandas as pd
from bokeh.io import output_notebook, reset_output
from bokeh.models import Legend, Dropdown, ColumnDataSource, CustomJS
from bokeh.plotting import figure, output_file, show
from bokeh.layouts import column
from bokeh.events import MenuItemClick
from wellcomeml.viz.palettes import (Wellcome33, WellcomeBackground, WellcomeNoData)
def visualize_clusters(clustering, filter_list: Optional[list] = None,
texts: Optional[list] = None,
radius: float = 0.05,
alpha: float = 0.5,
plot_width: int = 1000, plot_height: int = 530,
output_in_notebook: bool = True,
output_file_path: Optional[str] = None,
palette: Union[list, str] = 'Wellcome33'):
"""
This function creates a plot of the clusters
Args:
clustering: wellcomeml.ml.TextClustering instance
filter_list: list
texts: A list of texts to be displayed by the hover function
radius: float, default: 0.05
alpha: float, default: 0.5
plot_width: int, default: 600
plot_height: int, default: 600
output_in_notebook: bool, default: True
output_file_path: str, default: 'cluster_viz.html'
palette: list, default: Wellcome33
Returns:
None (Prints a bokeh figure)
"""
# Dataframe creation
reduced_points = clustering.reduced_points
data = | pd.DataFrame(reduced_points) | pandas.DataFrame |
# coding: utf-8
# ## Integrating LSTM model with Azure Machine Learning Package for Forecasting
#
# In this notebook, learn how to integrate LSTM model in the framework provided by Azure Machine Learning Package for Forecasting (AMLPF) to quickly build a forecasting model.
# We will use dow jones dataset to build a model that forecasts quarterly revenue for these 30 dow jones listed companies.
#
# #### Disclaimer:
# This notebook is based on the ongoing development work as part of the future release of AMLPF. Therefore, please consider this as a preview of what might become available in future as part of AMLPF.
# Further, please note that this work has currently been tested only on Windows platform.
#
# ### Prerequisites:
# If you don't have an Azure subscription, create a free account before you begin. The following accounts and application must be set up and installed:<br/>
# * Azure Machine Learning Experimentation account.
#
# If these three are not yet created or installed, follow the Azure Machine Learning Quickstart and Workbench installation article.
#
# In[18]:
import warnings
warnings.filterwarnings('ignore') # comment out this statement if you do not want to suppress the warnings.
import sys, os, inspect
import numpy as np
import pandas as pd
from datetime import datetime
import json
import requests
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import ftk
ftk_root_path = (ftk.__path__)[0] # This is the path where ftk package is installed.
from ftk.pipeline import AzureMLForecastPipeline
from ftk.operationalization.dnnscorecontext import DnnScoreContext
from ftk.operationalization.dnn_score_script_helper import score_run
from ftk.dnn_utils import create_lag_lead_features
from ftk.dnn_utils import pickle_keras_models
from keras.models import Model, Sequential
from keras.layers import Input, LSTM, Dense
from keras.models import load_model
print('imports done')
# In[2]:
np.random.seed(1000) # Set random seed for reproducibility.
# In[3]:
data_file_path = ftk_root_path + "\\data\\dow_jones\\dow_jones_data.tsv" # Change it depending upon where this file is stored.
num_lag_feats = 16 # Number of lag features to be used while training the model.
num_leads = 0 # Lead zero indicates current-time's value. forecast only one step at a time.
# Note: MAPE error computation is done considering num_leads = 0. It may need to be updated to take into account num_leads > 0. It has not been done yet.
num_test_records = 4 # Keep last four records for each company in the test data.
num_lstm_au = 50 # Number of units in single lstm layer.
num_epochs = 150 # Number of epochs to fit the model.
dj_series_freq = 'Q'
# In[4]:
# Read the dow_jones_data.
dj_df = pd.read_table(data_file_path)
print(dj_df.head())
print(dj_df.info())
# In[5]:
# Revenue has null values for some company. 'V' has been such identified company.
# In this experiment, we remove the company from the dataset instead of interpolating.
dj_df = dj_df[dj_df['company_ticker'] != 'V']
# Convert quarter_start field to datetime.
dj_df['quarter_start'] = pd.to_datetime(dj_df['quarter_start'])
print(dj_df.info())
# In[6]:
# Group data by company to normalize it accordingly.
grouped_data = dj_df.groupby(by='company_ticker')
cmp_to_scaler = {}
norm_dj_df = pd.DataFrame(columns=dj_df.columns) # Dataframe with quarter_start, company_ticker, normalized-revenue information.
# In[7]:
# Normalize each company's data individually and save the scaler into a dictionary to be used later.
for grp_name, grp_data in grouped_data:
cur_grp_data = grp_data.sort_values(by=['quarter_start'])
cur_grp_data = cur_grp_data.drop(['company_ticker', 'quarter_start'], axis=1)
scaler = MinMaxScaler(feature_range=(0.000001, 1))
norm_grp_data = scaler.fit_transform(cur_grp_data)
cmp_to_scaler[grp_name] = scaler
norm_grp_df = pd.DataFrame(norm_grp_data, columns=['revenue'])
aux_data_df = grp_data.loc[:,('quarter_start', 'company_ticker')]
aux_data_df.reset_index(drop=True, inplace=True)
cur_grp_norm_df = pd.concat((aux_data_df, norm_grp_df), axis=1)
norm_dj_df = norm_dj_df.append(cur_grp_norm_df)
# In[8]:
# Create 16 lags as features for each quarterly data point (normalized revenue in previous step).
dj_reg = pd.DataFrame()
norm_grp_data = norm_dj_df.groupby(by='company_ticker')
for grp_name, grp_data in norm_grp_data:
cur_grp_data = grp_data.sort_values(by=['quarter_start'])
dj_reg_grp = create_lag_lead_features(cur_grp_data, ts_col='revenue',
aux_cols=['company_ticker', 'quarter_start'], num_lags=num_lag_feats)
dj_reg = dj_reg.append(dj_reg_grp)
# In[9]:
# Create list of feature column-names.
feat_cols = []
feat_tgt_cols = []
for i in range(num_lag_feats, 0, -1) :
feat_cols.append('revenueLag' + str(i))
feat_tgt_cols.extend(feat_cols)
# Create list of target column-names.
target_cols = ['revenueLead0']
for i in range(1, num_leads+1) :
target_cols.append('revenueLead' + str(i))
feat_tgt_cols.extend(target_cols)
# In[10]:
# Divide the data into taining and test dataset for each company.
dj_reg_grp_data = dj_reg.groupby(by='company_ticker')
train_data = pd.DataFrame(columns=dj_reg.columns)
test_data = pd.DataFrame(columns=dj_reg.columns)
for grp_name, grp_data in dj_reg_grp_data:
cur_grp_data = grp_data.sort_values(by=['quarter_start'])
num_records = cur_grp_data.shape[0]
train_data = train_data.append(pd.DataFrame(cur_grp_data.iloc[:(num_records - num_test_records),:]))
test_data = test_data.append(pd.DataFrame(cur_grp_data.iloc[(num_records - num_test_records):,:]))
# In[11]:
# Extract features and target values for training data.
train_X = train_data[feat_cols]
train_Y = train_data[target_cols]
"""
Formatting the input to be of the shape (number_of_samples, timesteps, number_of_features).
For detail explanation refer to https://keras.io/layers/recurrent/.
Note: I am considering here single timestep (set to 1) and number of features to be 16. It could be specified in
a different way (I mean, 16 timesteps instead of 1) and I plan to experiment that in future.
"""
train_X = train_X.values.reshape((train_X.shape[0], 1, train_X.shape[1]))
train_Y = train_Y.values.reshape((train_Y.shape[0], train_Y.shape[1]))
print(train_X.shape)
print(train_Y.shape)
# In[12]:
# Create a LSTM network.
model = Sequential()
model.add(LSTM(num_lstm_au, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1)) #dimension of the output vector
model.compile(loss='mean_squared_error', optimizer='adam')
# In[13]:
# Fit network. Currently set the batch_size=1; will add more relevant information on this later.
history = model.fit(train_X, train_Y, epochs=num_epochs, batch_size=1, verbose=2, shuffle=False)
# In[14]:
# Print model.summary.
print(model.summary())
# In[21]:
pickle_keras_models()
# In[36]:
# Initialize dataframe with column-names to hold forecasts and other relevant information.
final_test_forecasts = pd.DataFrame(columns=['company_ticker', 'quarter_start', 'actual', 'forecast'])
# Initialize dataframe with column-names to hold MAPE (Mean Absolute Percentage Error) for each company.
final_mapes = pd.DataFrame(columns=['company_ticker', 'mape'])
"""
Compute prediction of test data one company at a time.
This is to simplify the process of scaling it back to original scale for that company.
"""
test_grp_data = test_data.groupby(by='company_ticker')
for grp_name, grp_data in test_grp_data:
cur_grp_data = grp_data.reset_index(drop=True)
cur_grp_data['quarter_start'] = pd.to_datetime(cur_grp_data['quarter_start'])
cur_grp_data = cur_grp_data.sort_values(by=['quarter_start'])
cur_final_test_fcasts = cur_grp_data[['company_ticker', 'quarter_start']]
scaler = cmp_to_scaler[grp_name]
test_X = cur_grp_data[feat_cols]
test_Y = cur_grp_data[target_cols]
test_X_reshape = test_X.values.reshape((test_X.shape[0], 1, test_X.shape[1]))
dnnscoreobject = DnnScoreContext(input_scoring_data=test_X_reshape,
pipeline_execution_type='predict') # construct a context object to be used for scoring purpose.
pipeline_lstm = AzureMLForecastPipeline([('lstm_model', model)])
#yhat = service.score(score_context=dnnscoreobject) # invoke the web service to get predictions on the test data.
yhat = json.loads(score_run(dnn_score_context=dnnscoreobject, pipeline=pipeline_lstm))
print(yhat)
inv_x_yhat = pd.concat((test_X, pd.DataFrame(yhat)), axis=1)
inv_x_yhat = scaler.inverse_transform(inv_x_yhat)
inv_x_yhat_df = pd.DataFrame(inv_x_yhat, columns=feat_tgt_cols)
inv_yhat = inv_x_yhat_df[target_cols]
cur_final_test_fcasts['forecast'] = inv_yhat
inv_x_y = pd.concat((test_X, pd.DataFrame(test_Y)), axis=1)
inv_x_y = scaler.inverse_transform(inv_x_y)
inv_x_y_df = | pd.DataFrame(inv_x_y, columns=feat_tgt_cols) | pandas.DataFrame |
import json
from optparse import OptionParser
import sys
import numpy as np
import pandas as pd
from scipy import stats
import tensorflow as tf
import utils
import models
pd.options.display.max_columns = 100
def train_on_data(train_vals, num_feats, passenger, outfile, init_bound, set_vars={}):
"""
Trains occupancy + context features model on data. Writes predictions to outfile
"""
tf.reset_default_graph()
# make placeholders for model
ka_tensor = tf.placeholder(tf.float32, shape=[None, None, None], name='ka_vals')
feature_tensor = tf.placeholder(tf.float32, shape=[None, None, None, num_feats], name='orf_ka')
nosite_feature_tensor = tf.placeholder(tf.float32, shape=[None, None, None, NUM_FEATS], name='nosite_feats')
mask_tensor = tf.placeholder(tf.float32, shape=[None, None, None], name='mask')
labels_tensor = tf.placeholder(tf.float32, shape=[None, None], name='labels')
# make data dictionary
train_data = {
'ka_vals': ka_tensor,
'mask': mask_tensor,
'features': feature_tensor,
'nosite_features': nosite_feature_tensor,
'labels': labels_tensor,
'passenger': passenger,
'num_guides': len(train_vals['guides'])
}
# make feed dictionary
train_feed_dict = {
ka_tensor: train_vals['ka_vals_3D'],
mask_tensor: train_vals['mask_3D'],
feature_tensor: train_vals['features_4D'],
nosite_feature_tensor: train_vals['nosite_features_4D'],
labels_tensor: train_vals['labels']
}
# make and train model
mod = models.OccupancyWithFeaturesModel(len(train_vals['guides']), num_feats, init_bound=init_bound, fit_background=False, passenger=passenger, set_vars=set_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
mod.fit(sess, train_data, train_feed_dict, maxiter=200)
print(f'Train r2: {mod.r2}')
print(f'Train loss: {mod.final_loss}')
print(f'Fit params: {mod.vars_evals}')
transcript_list = np.repeat(train_vals['transcripts'], len(train_vals['guides']))
pred_df = pd.DataFrame({
'transcript': transcript_list,
'mir': list(train_vals['guides']) * len(train_vals['transcripts']),
'pred': mod.eval_pred.flatten(),
'label': mod.eval_label.flatten(),
'pred_normed': mod.eval_pred_normed.flatten(),
'label_normed': mod.eval_label_normed.flatten(),
})
# if outfile is given, write results to outfile
if outfile is not None:
pred_df.to_csv(outfile, sep='\t', index=False)
mod.vars_evals['r2'] = mod.r2
mod.vars_evals['final_loss'] = mod.final_loss
return mod.vars_evals
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--tpm_file", dest="TPM_FILE", help="tpm data")
parser.add_option("--feature_file", dest="FEATURE_FILE", help="file with features")
parser.add_option("--mir_to_shuffle", dest="MIR_TO_SHUFFLE", help="miRNA to shuffle", default=None)
parser.add_option("--shuffle_mir", dest="SHUFFLE_MIR", help="miRNA to shuffle in", default=None)
parser.add_option("--shuffle_file", dest="SHUFFLE_FILE", help="file with shuffled features", default=None)
parser.add_option("--kd_cutoff", dest="KD_CUTOFF", help="cutoff value for KDs", default=None, type=float)
parser.add_option("--setparams", dest="SETPARAMS", help="json file of parameters to set", default=None)
parser.add_option("--mirseqs", dest="MIR_SEQS", help="tsv with miRNAs and their sequences")
parser.add_option("--test_mir", dest="TEST_MIR", help="test miRNA", default=None)
parser.add_option("--mode", dest="MODE", help="training_mode")
parser.add_option("--init_bound", dest="INIT_BOUND", help="offset by background binding", default=False, action='store_true')
parser.add_option("--extra_feats", dest="EXTRA_FEATS", help="comma-separated list of extra features", default=None)
parser.add_option("--passenger", dest="PASSENGER", help="include passenger", default=False, action='store_true')
parser.add_option("--outfile", dest="OUTFILE", help="output file", default=None)
parser.add_option("--outparams", dest="OUTPARAMS", help="output file for writing fitted parameters")
(options, args) = parser.parse_args()
if options.SHUFFLE_MIR is not None:
if options.MIR_TO_SHUFFLE == options.SHUFFLE_MIR:
print(options.MIR_TO_SHUFFLE, options.SHUFFLE_MIR)
sys.exit()
if options.MODE not in ['all', 'canon', 'noncanon']:
raise ValueError('Invalid mode.')
if options.EXTRA_FEATS == 'none':
MODEL = 'biochem'
elif options.EXTRA_FEATS == 'logSA_diff,Threep_canon,PCT':
MODEL = 'biochemplus'
else:
MODEL = options.EXTRA_FEATS.replace(',','_')
# read miRNA DATA and get names of all guide miRNAs
MIRNA_DATA = pd.read_csv(options.MIR_SEQS, sep='\t', index_col='mir')
ALL_GUIDES = sorted(list(MIRNA_DATA.index))
# print(ALL_GUIDES)
# split into training and testing
if options.TEST_MIR is None:
TRAIN_GUIDES = ALL_GUIDES
else:
TRAIN_GUIDES = [x for x in ALL_GUIDES if x != options.TEST_MIR]
print(f'Number training guides: {len(TRAIN_GUIDES)}')
# if using passenger strand, add them
if options.PASSENGER:
TRAIN_MIRS = list(np.array([[x, x + '_pass'] for x in TRAIN_GUIDES]).flatten())
else:
TRAIN_MIRS = TRAIN_GUIDES
# read in TPM data
ALL_TPMS = | pd.read_csv(options.TPM_FILE, sep='\t', index_col=0) | pandas.read_csv |
import glob
import numpy as np
import pandas as pd
import re
from PIL import Image
from torch.utils.data import Dataset
from torchvision.transforms import transforms
from lib.cfg import *
def get_calcification_data_index():
# grep .png files in absolute path
list_image_path = glob.glob(PATH_IMAGE+'*.png')
list_cal_mask_path = glob.glob(PATH_CALCIFICATION_MASK+'*.png')
list_lesion_mask_path = glob.glob(PATH_LESION_MASK+'*.png')
# build absolute path into DataFrame
df_image = pd.DataFrame({'image_path': list_image_path})
df_image['id'] = list(map(lambda x: int(re.findall(r'\d+', x)[-1]), df_image['image_path']))
df_cal_mask = | pd.DataFrame({'cal_mask_path': list_cal_mask_path}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.time import Time
def load_omni():
columns = ['date', 'time', 'hgi_lat', 'hgi_lon', 'br', 'bt', 'bn', 'b', 'v', 'v_lat', 'v_lon', 'density', 'temperature']
omni = pd.read_csv('OMNI_COHO1HR_MERGED_MAG_PLASMA_199207.txt', delim_whitespace=True, skiprows=240, skipfooter=3, names=columns)
# Combine date and time into timeindex
omni['time'] = pd.to_datetime(omni['date']+'T'+omni['time'], format='%d-%m-%YT%H:%M:%S.%f')
omni.drop(columns='date', inplace=True)
# Set invalid numbers to NaN
id_bad = omni == -1e31
omni[id_bad] = np.NaN
return omni
def load_icme():
"""
Function to load in the Richardson and Cane ICME list.
"""
columns = ['shock', 'leading_edge', 'trailing_edge', 'type']
icme = pd.read_csv('ICMEs.dat', delim_whitespace=True, names=columns)
# Convert MJD to datetimes.
icme['shock'] = pd.to_datetime(Time(icme['shock'], format='mjd').datetime)
icme['leading_edge'] = pd.to_datetime(Time(icme['leading_edge'], format='mjd').datetime)
icme['trailing_edge'] = pd.to_datetime(Time(icme['trailing_edge'], format='mjd').datetime)
return icme
def cumdf(data, bin_edges):
hist, bins = np.histogram(data, bins=bin_edges)
cdf = np.cumsum(hist) / np.sum(hist)
return cdf
def main():
omni = load_omni()
icme = load_icme()
# Remove rows from omni with any bad speed, density or imf magnitude data (all needed for G calculaiton)
#omni.dropna(how='any', subset=['density', 'v', 'b'], inplace=True)
# compute geoeffectivness parameter
alpha=0.5;
# IMF clock angle
theta=np.arctan2(-omni['bt'],omni['bn']);
# Equation 1 in Owens et al.
d_exp = 2.0/3.0 - alpha
b_exp = 2.0*alpha
v_exp = 7.0/3.0 - 2.0*alpha
omni['g'] = (omni['density']**d_exp) * (omni['b']**b_exp) * (omni['v']**v_exp) * (np.sin(theta/2.0)**4.0) * 1e-6
quantile_thresh = 0.99 # the percentile to consider
g_thresh = np.nanquantile(omni['g'], quantile_thresh)
print("99th percentile of g: {:3.4f}".format(g_thresh))
plt.plot(omni['time'], omni['g'], 'k-', zorder=0)
plt.hlines(g_thresh, omni['time'].min(), omni['time'].max(), colors='r', linestyles='--', zorder=1)
plt.xlim(omni['time'].min(), omni['time'].max())
plt.ylim(0, omni['g'].max())
plt.xlabel('Time')
plt.ylabel('Geoeffectiveness')
plt.savefig('geoeffectiveness.png')
# Compute the quantiles of the CME speed distribution
n_categories = 10 # number of categories for V, B. 4 = quartiles
n_bins = 10000 # number of bins for CDF [10000]
n_cl_bins = 100 # number of cost/loss bins
# Define G bins for computing CDFs
g_min = omni['g'].min()
g_max = omni['g'].max()
dg = (g_max-g_min)/n_bins
g_bin_edges = np.arange(g_min, g_max+dg, dg)
g_bin_centres = (g_bin_edges[0:-1] + g_bin_edges[1:]) / 2.0
# Loop through the ICMEs and compute the average CME properties, and also mask the solar wind time series.
# Add in keys to icme and omni for the average solar wind properties and cme properties.
for key in ['v', 'b']:
icme[key] = np.NaN*np.zeros(icme.shape[0])
for key in ['cme_v', 'cme_b', 'region', 'type']:
if key not in ['region', 'type']:
omni[key] = np.NaN*np.zeros(omni.shape[0])
else:
omni[key] = np.zeros(omni.shape[0])
for i, row in icme.iterrows():
# Find solar wind period between cme shock and trailing edge
id_cme = (omni['time'] >= row['shock']) & (omni['time'] <= row['trailing_edge'])
if np.any(id_cme):
# Update ICME with solar wind parameters
icme.loc[i, 'v'] = omni.loc[id_cme, 'v'].mean(skipna=True)
icme.loc[i, 'b'] = omni.loc[id_cme, 'b'].mean(skipna=True)
# Update solar wind paramters with average CME properties and type
omni.loc[id_cme, 'cme_v'] = icme.loc[i, 'v']
omni.loc[id_cme, 'cme_b'] = icme.loc[i, 'b']
omni.loc[id_cme, 'region'] = 1 # Flag for being in sheath, will update CME regions after.
omni.loc[id_cme, 'type'] = icme.loc[i, 'type']
# Update region flag if in CME rather than sheath
id_cme = (omni['time'] >= row['leading_edge']) & (omni['time'] <= row['trailing_edge'])
if np.any(id_cme):
omni.loc[id_cme, 'region'] = 2
quantiles = np.arange(1, n_categories, 1) / n_categories
v_quantiles = icme['v'].quantile(quantiles)
b_quantiles = icme['b'].quantile(quantiles)
# Find the indices of SW parameters for the different v and b quantiles and sw classifications (cme, no cme)
groups = {}
groups['all'] = np.argwhere(np.isfinite(omni['g'])).squeeze()
groups['no_cme'] = np.argwhere(omni['region'] == 0).squeeze()
groups['cme'] = np.argwhere(omni['region'] > 0).squeeze()
for i in range(v_quantiles.size + 1):
v_key = "v_{:02d}".format(i)
b_key = "b_{:02d}".format(i)
if i == 0:
id_group = omni['cme_v'] <= v_quantiles.values[i] # do nans need to be exlucded here?
groups[v_key] = np.argwhere(id_group).squeeze()
id_group = omni['cme_b'] <= b_quantiles.values[i]
groups[b_key] = np.argwhere(id_group).squeeze()
elif (i > 0) & (i < v_quantiles.size):
id_group = (omni['cme_v'] > v_quantiles.values[i-1]) & (omni['cme_v'] <= v_quantiles.values[i])
groups[v_key] = np.argwhere(id_group).squeeze()
id_group = (omni['cme_b'] > b_quantiles.values[i-1]) & (omni['cme_b'] <= b_quantiles.values[i])
groups[b_key] = np.argwhere(id_group).squeeze()
elif i == v_quantiles.size:
id_group = omni['cme_v'] > v_quantiles.values[i-1]
groups[v_key] = np.argwhere(id_group).squeeze()
id_group = omni['cme_b'] > b_quantiles.values[i-1]
groups[b_key] = np.argwhere(id_group).squeeze()
# Now the combined V and B groups
for i in range(v_quantiles.size + 1):
v_key = "v_{:02d}".format(i)
for j in range(b_quantiles.size + 1):
b_key = "b_{:02d}".format(j)
vb_key = v_key + '_' + b_key
# Also get the intersection of the matched quantiles for the combined v-b category
groups[vb_key] = np.intersect1d(groups[v_key], groups[b_key])
# Compute the exceedance probability and numbers above and below threshold for each grouping of the data.
prob = {}
number = {}
# Find g_bin closest to threshold from below.
pos = np.argwhere(g_bin_centres <= g_thresh).squeeze() # should this be <=?
id_exceed = pos[-1]
for key, index in groups.items():
g_sub = omni.loc[index, 'g']
cdf = cumdf(g_sub, g_bin_edges)
prob[key] = 1.0 - cdf[id_exceed]
n_above = np.sum(g_sub > g_thresh)
n_below = np.sum(g_sub <= g_thresh)
n_all = np.sum(np.isfinite(g_sub))
number[key] = {'above': n_above, 'below': n_below, 'all': n_all}
# Some statistics in the paper:
print("Ncme = {}, Ncme' = {}".format(number['cme']['all'], number['cme']['above']))
print("Nsw = {}, Nsw' = {}".format(number['no_cme']['all'], number['no_cme']['above']))
print("p'sw = {:3.4f}".format(number['no_cme']['above']/number['no_cme']['all']))
print("p'cme = {:3.4f}".format(number['cme']['above']/number['cme']['all']))
# Paper has Ncme=17744, Ncme'=1149, Nsw=216787, Nsw'=1112
dcost = 1.0 / n_cl_bins
costs = np.arange(dcost, 1.0, dcost)
# DataFrame to store all cost calculations
costloss = | pd.DataFrame({'cost':costs, 'perfect': 0, 'climatology': 0, 'cmes': 0, 'v': 0, 'b': 0, 'vb': 0}) | pandas.DataFrame |
import json
import pandas as pd
import re
import sys
fdir = '../data/geo/1_separate/chelsa'
base_url = 'https://www.wsl.ch/lud/chelsa/data'
if __name__ == "__main__":
# First part to modify js file so that it dumps the js object as JSON
if sys.argv[1] == 'part1':
path = f'{fdir}/index.js'
f = open(path, 'r')
d = f.read()
f.close()
l1 = '''var fs = require('file-system');
'''
l2 = '''
fs.writeFile("index.json", JSON.stringify(dirs), (err) => {
if (err) console.log(err);
console.log("JSON converted from js object and successfully Written to index.json.");
});
'''
new = l1 + d + l2
path = f'{fdir}/index.js'
f = open(path, 'w+')
f.write(new)
f.close()
f.close()
# Second part to get all URLs from json object
if sys.argv[1] == 'part2':
path = f'{fdir}/index.json'
print(f'{path}')
f = open(path, 'r')
d = f.read()
js = json.loads(d)
f.close()
# Checking out structure of json
# print(js.keys())
# print(js['bioclim'].keys())
# print(js['bioclim']['integer']['f']) # all bioclim10
# print(js['climatologies'].keys())
# print(js['climatologies']['prec']['f'])
# print(js['climatologies']['temp']['integer'].keys())
# print(js['climatologies']['temp']['integer']['tmax'])
# print(js['climatologies']['temp']['integer']['tmin'])
# print(js['climatologies']['temp']['integer']['temp'])
df1 = pd.DataFrame(js['bioclim']['integer']['f'])
df1['dir'] = 'bioclim/integer'
df2 = pd.DataFrame(js['climatologies']['prec']['f'])
df2['dir'] = 'climatologies/prec'
df3 = pd.DataFrame(js['climatologies']['temp']['integer']['tmax']['f'])
df3['dir'] = 'climatologies/temp/integer/tmax'
df4 = pd.DataFrame(js['climatologies']['temp']['integer']['tmin']['f'])
df4['dir'] = 'climatologies/temp/integer/tmin'
df5 = pd.DataFrame(js['climatologies']['temp']['integer']['temp']['f'])
df5['dir'] = 'climatologies/temp/integer/temp'
df = | pd.concat([df1, df2, df3, df4, df5]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
import nose
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Index
from pandas.lib import Timestamp
from pandas.compat import StringIO
class UsecolsTests(object):
def test_raise_on_mixed_dtype_usecols(self):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
msg = ("The elements of 'usecols' must "
"either be all strings, all unicode, or all integers")
usecols = [0, 'b', 2]
with tm.assertRaisesRegexp(ValueError, msg):
self.read_csv(StringIO(data), usecols=usecols)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# see gh-5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_usecols_index_col_False(self):
# see gh-9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_usecols_index_col_conflict(self):
# see gh-4201: test that index_col as integer reflects usecols
data = 'a,b,c,d\nA,a,1,one\nB,b,2,two'
expected = DataFrame({'c': [1, 2]}, index=Index(
['a', 'b'], name='b'))
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=['b', 'c'],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col='b')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[1, 2],
index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'b': ['a', 'b'], 'c': [1, 2], 'd': ('one', 'two')})
expected = expected.set_index(['b', 'c'])
df = self.read_csv(StringIO(data), usecols=['b', 'c', 'd'],
index_col=['b', 'c'])
tm.assert_frame_equal(expected, df)
def test_usecols_implicit_index_col(self):
# see gh-2654
data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10'
result = self.read_csv(StringIO(data), usecols=['a', 'b'])
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(self):
# see gh-2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
df = self.read_csv(StringIO(data), sep='\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(df, expected)
def test_usecols_with_whitespace(self):
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
result = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
"""
Importing necessary libraires.
"""
import tweepy
import json
import re
import string
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.python.keras.preprocessing.text import Tokenizer
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.models import model_from_json
import random
from flask import Flask,render_template,url_for,request
import numpy as np
import emoji
app = Flask(__name__)
"""
Function to render page http://127.0.0.1:5000/
"""
@app.route('/')
def hello(st=''):
print("HOME")
return render_template('home.html',title='home')
"""
Function to render page http://127.0.0.1:5000/analysis
"""
@app.route('/analysis',methods=['POST','GET','OPTIONS'])
def analysis():
"""
Taking search query into the variable 'key'.
"""
key=request.form['InputText']
"""
Performing authentication to access twitter's data.
(Use twitter developer credentials below and uncomment the following piece commented code).
"""
"""
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
"""
"""
Creating an api object using tweepy.
"""
api = tweepy.API (auth)
"""
Fetching tweets and storing them in results array. 'num' variable denotes the number of tweets to be fetched.
"""
results = []
num = 50
for tweet in tweepy.Cursor (api.search, q = key, lang = "en").items(num):
results.append(tweet)
"""
Creating a pandas dataframe to capture tweet information.
"""
dataset=pd.DataFrame()
dataset["tweet_id"]=pd.Series([tweet.id for tweet in results])
dataset["username"]=pd.Series([tweet.author.screen_name for tweet in results])
dataset["text"]= | pd.Series([tweet.text for tweet in results]) | pandas.Series |
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
dense = arr.to_dense()
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype('float32'))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0., 2.],
dtype=dtype.subtype),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0, 2], dtype=np.int64),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with pytest.raises(ValueError, match='NA'):
arr.astype('Sparse[i8]')
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True],
dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray([True, False, False, True],
dtype=SparseDtype(bool, False))
tm.assert_sp_array_equal(result, expected)
def test_astype_all(self, any_real_dtype):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
typ = np.dtype(any_real_dtype)
res = arr.astype(typ)
assert res.dtype == SparseDtype(typ, 1)
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(np.asarray(res.to_dense()),
vals.astype(typ))
@pytest.mark.parametrize('array, dtype, expected', [
(SparseArray([0, 1]), 'float',
SparseArray([0., 1.], dtype=SparseDtype(float, 0.0))),
(SparseArray([0, 1]), bool, SparseArray([False, True])),
(SparseArray([0, 1], fill_value=1), bool,
SparseArray([False, True], dtype=SparseDtype(bool, True))),
pytest.param(
SparseArray([0, 1]), 'datetime64[ns]',
SparseArray(np.array([0, 1], dtype='datetime64[ns]'),
dtype=SparseDtype('datetime64[ns]',
pd.Timestamp('1970'))),
marks=[pytest.mark.xfail(reason="NumPy-7619")],
),
(SparseArray([0, 1, 10]), str,
SparseArray(['0', '1', '10'], dtype=SparseDtype(str, '0'))),
(SparseArray(['10', '20']), float, SparseArray([10.0, 20.0])),
(SparseArray([0, 1, 0]), object,
SparseArray([0, 1, 0], dtype=SparseDtype(object, 0))),
])
def test_astype_more(self, array, dtype, expected):
result = array.astype(dtype)
tm.assert_sp_array_equal(result, expected)
def test_astype_nan_raises(self):
arr = SparseArray([1.0, np.nan])
with pytest.raises(ValueError, match='Cannot convert non-finite'):
arr.astype(int)
def test_set_fill_value(self):
arr = SparseArray([1., np.nan, 2.], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# XXX: this seems fine? You can construct an integer
# sparsearray with NaN fill value, why not update one?
# coerces to int
# msg = "unable to set fill_value 3\\.1 to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 3.1
assert arr.fill_value == 3.1
# msg = "unable to set fill_value nan to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
# msg = "unable to set fill_value 0 to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 0
assert arr.fill_value == 0
# msg = "unable to set fill_value nan to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
@pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)])
def test_set_fill_invalid_non_scalar(self, val):
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
msg = "fill_value must be a scalar"
with pytest.raises(ValueError, match=msg):
arr.fill_value = val
def test_copy(self):
arr2 = self.arr.copy()
assert arr2.sp_values is not self.arr.sp_values
assert arr2.sp_index is self.arr.sp_index
def test_values_asarray(self):
assert_almost_equal(self.arr.to_dense(), self.arr_data)
@pytest.mark.parametrize('data,shape,dtype', [
([0, 0, 0, 0, 0], (5,), None),
([], (0,), None),
([0], (1,), None),
(['A', 'A', np.nan, 'B'], (4,), np.object)
])
def test_shape(self, data, shape, dtype):
# GH 21126
out = SparseArray(data, dtype=dtype)
assert out.shape == shape
@pytest.mark.parametrize("vals", [
[np.nan, np.nan, np.nan, np.nan, np.nan],
[1, np.nan, np.nan, 3, np.nan],
[1, np.nan, 0, 3, 0],
])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_dense_repr(self, vals, fill_value):
vals = np.array(vals)
arr = SparseArray(vals, fill_value=fill_value)
res = arr.to_dense()
tm.assert_numpy_array_equal(res, vals)
with tm.assert_produces_warning(FutureWarning):
res2 = arr.get_values()
tm.assert_numpy_array_equal(res2, vals)
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.to_dense()[i])
for i in range(len(self.arr)):
_checkit(i)
_checkit(-i)
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
def test_getslice(self):
result = self.arr[:-3]
exp = SparseArray(self.arr.to_dense()[:-3])
tm.assert_sp_array_equal(result, exp)
result = self.arr[-4:]
exp = SparseArray(self.arr.to_dense()[-4:])
tm.assert_sp_array_equal(result, exp)
# two corner cases from Series
result = self.arr[-12:]
exp = SparseArray(self.arr)
tm.assert_sp_array_equal(result, exp)
result = self.arr[:-12]
exp = SparseArray(self.arr.to_dense()[:0])
tm.assert_sp_array_equal(result, exp)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ], fill_value=0)
| tm.assert_sp_array_equal(res, exp) | pandas.util.testing.assert_sp_array_equal |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 20 15:50:32 2022
@author: kkrao
"""
import os
import pandas as pd
import init
csvs = os.listdir(os.path.join(init.dir_root, "data","gee","all_states"))
df = pd.read_csv(os.path.join(init.dir_root, "data","gee",\
"lightnings_22_feb_2022_2016_2021_California.csv"))
for state in init.states:
n_state_files = 0
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import csv
import copy
import time
import json
import ipaddress
import pickle
import operator
from Policy import Policy
from time import sleep
class Utils(object):
@staticmethod
def search_interval_array(interval_dict, value):
interval_array = list(interval_dict.keys())
low, high = 0, len(interval_array) - 1
while (low < high):
mid = int((high + low) / 2)
# print("low = " + str(low) + " high = " + str(high) + " mid = " + str(mid))
if value in interval_array[mid]:
return interval_dict[interval_array[mid]]
if value < interval_array[mid].right:
high = mid
if value > interval_array[mid].left:
low = mid
print("Error!")
return None
@staticmethod
def run_pareto_visualization():
alphas = [1, 1.5, 1.7, 2]
x_ms = [1, 1.5, 1.7, 2]
n_exp = 1000
for a in alphas:
for x_m in x_ms:
data = np.sort((np.random.pareto(a, n_exp) + 1) * x_m)
PlotTG.plot_xy(range(n_exp), data, "Sample Index", "Sample Value",
"Pareto Distribution for alpha = " + str(a) + " x_m = " + str(x_m))
PlotTG.plot_cdf(data, "Sample Value",
"CDF Of Pareto Distribution for alpha = " + str(a) + " x_m = " + str(x_m))
class PlotTG(object):
@staticmethod
def plot_xy(x_data, y_data, x_label, y_label, title):
plt.clf()
fig, ax = plt.subplots()
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
ax.plot(x_data, y_data)
plt.tight_layout()
fig.show()
@staticmethod
def plot_cdf(data, data_x="data", data_title="title"):
x = np.linspace(min(data), max(data), 101)
count_dict = {i: 0 for i in x}
for i in x:
for j in data:
if j < i:
count_dict[i] += 1
max_n = max(count_dict.values())
plt.clf()
fig, ax = plt.subplots()
plt.xlabel(data_x)
plt.ylabel("CDF")
plt.title(data_title)
ax.plot(count_dict.keys(), [i / max_n for i in count_dict.values()])
plt.tight_layout()
fig.show()
@staticmethod
def plot_grayscacle_heatmap(prob_df):
prob_mtx = prob_df.to_numpy()
# 0 - black 255 - white
max_n = max([max(row) for row in prob_mtx])
mean = lambda x: sum(x) / len(x)
mean_n = mean([mean(row) for row in prob_mtx])
# normalize
gs_matrix1 = [[np.uint8((1 - (i / mean_n) * 1e15) * 255) for i in row] for row in prob_mtx]
gs_matrix2 = [[np.uint8((1 - (i / max_n) * 1e15) * 255) for i in row] for row in prob_mtx]
fig, ax = plt.subplots()
ax = plt.imshow(gs_matrix1, cmap='gray')
plt.show()
fig, ax = plt.subplots()
ax = plt.imshow(gs_matrix2, cmap='gray')
plt.show()
@staticmethod
def plot_array(data, title, x_label, y_label, sort=True):
plt.clf()
fig, ax = plt.subplots()
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
if sort:
ax.plot(range(1, len(data) + 1), np.sort(data))
else:
ax.plot(range(1, len(data) + 1), data)
fig.show()
@staticmethod
def generate_probabilities_matrix(flow_array):
# Rows correspond to source racks and columns to destination racks - ProjecToR
# Pr(SRC = s, DST = d) == Pr(SRC = s)*Pr(DST = d) i.i.d
ips, pair_flow_size = set(), dict()
for tp in flow_array: # tp = src_ip, dst_ip, flow_size, flow_id
src_ip, dst_ip, flow_size = str(tp[0]), str(tp[1]), int(tp[2])
ips.add(src_ip)
ips.add(dst_ip)
# save new or aggregate
pair_flow_size[(src_ip, dst_ip)] = flow_size if pair_flow_size.get(
(src_ip, dst_ip)) is None else pair_flow_size[(src_ip, dst_ip)] + flow_size
total_flow_size = sum([tp[2] for tp in flow_array])
df_dict = {}
for ip_dst in ips:
curr_column = []
for ip_src in ips:
flow_size = pair_flow_size.get((ip_src, ip_dst))
connection_probability = float(flow_size / total_flow_size) if flow_size is not None else 0.0
curr_column.append(connection_probability)
df_dict[ip_dst] = curr_column
df = pd.DataFrame.from_dict(df_dict)
return df
class TrafficMatrix(object):
def __init__(self, n, alpha, x_m):
self.src_dist = np.sort(np.random.pareto(alpha, n + 1) * x_m) # n intervals
self.dst_dist = np.sort(np.random.pareto(alpha, n + 1) * x_m) # n intervals
src_interval_array = | pd.arrays.IntervalArray.from_arrays(self.src_dist[:-1], self.src_dist[1:]) | pandas.arrays.IntervalArray.from_arrays |
import html5lib
import requests
import lxml
from bs4 import BeautifulSoup
from bs4 import Comment
import pandas as pd
import numpy as np
pd.set_option('mode.chained_assignment', None)
#Getting the teams acronims
teams = | pd.read_csv('mlb_teams_abbreviations.csv') | pandas.read_csv |
"""
Long/Short Cross-Sectional Momentum
Author: <NAME>
This algorithm creates traditional value factors and standardizes
them using a synthetic S&P500. It then uses a 130/30 strategy to trade.
https://www.math.nyu.edu/faculty/avellane/Lo13030.pdf
Please direct any questions, feedback, or corrections to <EMAIL>
The material on this website is provided for informational purposes only
and does not constitute an offer to sell, a solicitation to buy, or a
recommendation or endorsement for any security or strategy,
nor does it constitute an offer to provide investment advisory or other services by Quantopian.
In addition, the content of the website neither constitutes investment advice
nor offers any opinion with respect to the suitability of any security or any specific investment.
Quantopian makes no guarantees as to accuracy or completeness of the
views expressed in the website. The views are subject to change,
and may have become unreliable for various reasons,
including changes in market conditions or economic circumstances.
"""
import numpy as np
import pandas as pd
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data import morningstar
from quantopian.pipeline.factors import CustomFactor
from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.factors import SimpleMovingAverage, AverageDollarVolume
from quantopian.pipeline.filters.morningstar import IsPrimaryShare
from quantopian.pipeline.data import morningstar as mstar
# Custom Factor 1 : Dividend Yield
class Div_Yield(CustomFactor):
inputs = [morningstar.valuation_ratios.dividend_yield]
window_length = 1
def compute(self, today, assets, out, d_y):
out[:] = d_y[-1]
# Custom Factor 2 : P/B Ratio
class Price_to_Book(CustomFactor):
inputs = [morningstar.valuation_ratios.pb_ratio]
window_length = 1
def compute(self, today, assets, out, p_b_r):
out[:] = -p_b_r[-1]
# Custom Factor 3 : Price to Trailing 12 Month Sales
class Price_to_TTM_Sales(CustomFactor):
inputs = [morningstar.valuation_ratios.ps_ratio]
window_length = 1
def compute(self, today, assets, out, ps):
out[:] = -ps[-1]
# Custom Factor 4 : Price to Trailing 12 Month Cashflow
class Price_to_TTM_Cashflows(CustomFactor):
inputs = [morningstar.valuation_ratios.pcf_ratio]
window_length = 1
def compute(self, today, assets, out, pcf):
out[:] = -pcf[-1]
# This factor creates the synthetic S&P500
class SPY_proxy(CustomFactor):
inputs = [morningstar.valuation.market_cap]
window_length = 1
def compute(self, today, assets, out, mc):
out[:] = mc[-1]
# This pulls all necessary data in one step
def Data_Pull():
# create the pipeline for the data pull
Data_Pipe = Pipeline()
# create SPY proxy
Data_Pipe.add(SPY_proxy(), 'SPY Proxy')
# Div Yield
Data_Pipe.add(Div_Yield(), 'Dividend Yield')
# Price to Book
Data_Pipe.add(Price_to_Book(), 'Price to Book')
# Price / TTM Sales
Data_Pipe.add(Price_to_TTM_Sales(), 'Price / TTM Sales')
# Price / TTM Cashflows
Data_Pipe.add(Price_to_TTM_Cashflows(), 'Price / TTM Cashflow')
return Data_Pipe
# function to filter out unwanted values in the scores
def filter_fn(x):
if x <= -10:
x = -10.0
elif x >= 10:
x = 10.0
return x
def standard_frame_compute(df):
"""
Standardizes the Pipeline API data pull
using the S&P500's means and standard deviations for
particular CustomFactors.
parameters
----------
df: numpy.array
full result of Data_Pull
returns
-------
numpy.array
standardized Data_Pull results
numpy.array
index of equities
"""
# basic clean of dataset to remove infinite values
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna()
# need standardization params from synthetic S&P500
df_SPY = df.sort(columns='SPY Proxy', ascending=False)
# create separate dataframe for SPY
# to store standardization values
df_SPY = df_SPY.head(500)
# get dataframes into numpy array
df_SPY = df_SPY.as_matrix()
# store index values
index = df.index.values
# turn iinto a numpy array for speed
df = df.as_matrix()
# create an empty vector on which to add standardized values
df_standard = np.empty(df.shape[0])
for col_SPY, col_full in zip(df_SPY.T, df.T):
# summary stats for S&P500
mu = np.mean(col_SPY)
sigma = np.std(col_SPY)
col_standard = np.array(((col_full - mu) / sigma))
# create vectorized function (lambda equivalent)
fltr = np.vectorize(filter_fn)
col_standard = (fltr(col_standard))
# make range between -10 and 10
col_standard = (col_standard / df.shape[1])
# attach calculated values as new row in df_standard
df_standard = np.vstack((df_standard, col_standard))
# get rid of first entry (empty scores)
df_standard = np.delete(df_standard,0,0)
return (df_standard, index)
def composite_score(df, index):
"""
Summarize standardized data in a single number.
parameters
----------
df: numpy.array
standardized results
index: numpy.array
index of equities
returns
-------
pandas.Series
series of summarized, ranked results
"""
# sum up transformed data
df_composite = df.sum(axis=0)
# put into a pandas dataframe and connect numbers
# to equities via reindexing
df_composite = | pd.Series(data=df_composite,index=index) | pandas.Series |
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import pytest
from histogrammar.dfinterface.pandas_histogrammar import PandasHistogrammar
from histogrammar.dfinterface.make_histograms import (
get_bin_specs,
get_time_axes,
make_histograms,
)
def test_get_histograms():
pandas_filler = PandasHistogrammar(
features=[
"date",
"isActive",
"age",
"eyeColor",
"gender",
"company",
"latitude",
"longitude",
["isActive", "age"],
["latitude", "longitude"],
],
bin_specs={
"longitude": {"binWidth": 5, "origin": 0},
"latitude": {"binWidth": 5, "origin": 0},
},
)
current_hists = pandas_filler.get_histograms(pytest.test_df)
assert current_hists["age"].toJson() == pytest.age
assert current_hists["company"].toJson() == pytest.company
assert current_hists["date"].toJson() == pytest.date
assert current_hists["eyeColor"].toJson() == pytest.eyesColor
assert current_hists["gender"].toJson() == pytest.gender
assert current_hists["isActive"].toJson() == pytest.isActive
assert current_hists["isActive:age"].toJson() == pytest.isActive_age
assert current_hists["latitude"].toJson() == pytest.latitude
assert current_hists["longitude"].toJson() == pytest.longitude
assert current_hists["latitude:longitude"].toJson() == pytest.latitude_longitude
def test_make_histograms():
features = [
"date",
"isActive",
"age",
"eyeColor",
"gender",
"company",
"latitude",
"longitude",
["isActive", "age"],
["latitude", "longitude"],
"transaction",
]
bin_specs = {
"transaction": {"num": 100, "low": -2000, "high": 2000},
"longitude": {"binWidth": 5, "origin": 0},
"latitude": {"binWidth": 5, "origin": 0},
}
current_hists = make_histograms(
pytest.test_df, features=features, binning="unit", bin_specs=bin_specs
)
assert current_hists["age"].toJson() == pytest.age
assert current_hists["company"].toJson() == pytest.company
assert current_hists["date"].toJson() == pytest.date
assert current_hists["eyeColor"].toJson() == pytest.eyesColor
assert current_hists["gender"].toJson() == pytest.gender
assert current_hists["isActive"].toJson() == pytest.isActive
assert current_hists["isActive:age"].toJson() == pytest.isActive_age
assert current_hists["latitude"].toJson() == pytest.latitude
assert current_hists["longitude"].toJson() == pytest.longitude
assert current_hists["latitude:longitude"].toJson() == pytest.latitude_longitude
assert current_hists["transaction"].toJson() == pytest.transaction
def test_make_histograms_no_time_axis():
hists, features, bin_specs, time_axis, var_dtype = make_histograms(
pytest.test_df, time_axis="", ret_specs=True,
)
assert len(hists) == 21
assert len(features) == 21
assert len(bin_specs) == 6
assert len(var_dtype) == 21
assert time_axis == ""
assert "date" in hists
h = hists["date"]
assert h.binWidth == 751582381944448.0
for cols in features:
cols = cols.split(":")
assert len(cols) == 1
for f, bs in bin_specs.items():
assert isinstance(bs, dict)
assert "age" in bin_specs
dateage = bin_specs["age"]
assert dateage["binWidth"] == 2.0
assert dateage["origin"] == 9.5
def test_make_histograms_with_time_axis():
hists, features, bin_specs, time_axis, var_dtype = make_histograms(
pytest.test_df, time_axis=True, ret_specs=True, time_width=None, time_offset=None
)
assert len(hists) == 20
assert len(features) == 20
assert len(bin_specs) == 20
assert len(var_dtype) == 21
assert time_axis == "date"
assert "date:age" in hists
h = hists["date:age"]
assert h.binWidth == 751582381944448.0
for cols in features:
cols = cols.split(":")
assert len(cols) == 2 and cols[0] == "date"
for f, bs in bin_specs.items():
assert len(bs) == 2
assert "date:age" in bin_specs
dateage = bin_specs["date:age"]
assert dateage[0]["binWidth"] == 751582381944448.0
assert dateage[1]["binWidth"] == 2.0
assert dateage[1]["origin"] == 9.5
# test get_bin_specs 1
bin_specs = get_bin_specs(hists)
assert "date:age" in bin_specs
dateage = bin_specs["date:age"]
assert dateage[0]["binWidth"] == 751582381944448.0
assert dateage[1]["binWidth"] == 2.0
assert dateage[1]["origin"] == 9.5
# test get_bin_specs 2
bin_specs = get_bin_specs(hists, skip_first_axis=True)
assert "age" in bin_specs
age = bin_specs["age"]
assert age["binWidth"] == 2.0
assert age["origin"] == 9.5
# test get_bin_specs 3
bin_specs = get_bin_specs(hists["date:age"])
assert bin_specs[0]["binWidth"] == 751582381944448.0
assert bin_specs[1]["binWidth"] == 2.0
assert bin_specs[1]["origin"] == 9.5
# test get_bin_specs 4
bin_specs = get_bin_specs(hists["date:age"], skip_first_axis=True)
assert bin_specs["binWidth"] == 2.0
assert bin_specs["origin"] == 9.5
def test_make_histograms_unit_binning():
hists, features, bin_specs, time_axis, var_dtype = make_histograms(
pytest.test_df, binning="unit", time_axis="", ret_specs=True
)
assert len(hists) == 21
assert len(features) == 21
assert len(bin_specs) == 0
assert len(var_dtype) == 21
assert time_axis == ""
assert "date" in hists
h = hists["date"]
assert h.binWidth == 2592000000000000
for cols in features:
cols = cols.split(":")
assert len(cols) == 1
for f, bs in bin_specs.items():
assert isinstance(bs, dict)
assert "age" in hists
h = hists["age"]
assert h.binWidth == 1.0
assert h.origin == 0.0
def test_get_histograms_module():
pandas_filler = PandasHistogrammar(
features=[
"date",
"isActive",
"age",
"eyeColor",
"gender",
"company",
"latitude",
"longitude",
["isActive", "age"],
["latitude", "longitude"],
],
bin_specs={
"longitude": {"binWidth": 5, "origin": 0},
"latitude": {"binWidth": 5, "origin": 0},
},
read_key="input",
store_key="output",
)
datastore = pandas_filler.transform(datastore={"input": pytest.test_df})
assert "output" in datastore
current_hists = datastore["output"]
assert current_hists["age"].toJson() == pytest.age
assert current_hists["company"].toJson() == pytest.company
assert current_hists["date"].toJson() == pytest.date
assert current_hists["eyeColor"].toJson() == pytest.eyesColor
assert current_hists["gender"].toJson() == pytest.gender
assert current_hists["isActive"].toJson() == pytest.isActive
assert current_hists["isActive:age"].toJson() == pytest.isActive_age
assert current_hists["latitude"].toJson() == pytest.latitude
assert current_hists["longitude"].toJson() == pytest.longitude
assert current_hists["latitude:longitude"].toJson() == pytest.latitude_longitude
def test_get_time_axes():
time_axes = get_time_axes(pytest.test_df)
np.testing.assert_array_equal(time_axes, ["date"])
def test_null_histograms():
d = {'transaction': {0: np.nan, 1: 1.0, 2: np.nan, 3: 3.0, 4: 4.0},
'isActive': {0: None, 1: None, 2: True, 3: True, 4: False},
'eyeColor': {0: None, 1: None, 2: 'Jones', 3: 'USA', 4: 'FL'},
't2': {0: np.nan, 1: 2.0, 2: np.nan, 3: 4.0, 4: 5.0},
'foo': {0: np.nan, 1: np.nan, 2: np.nan, 3: True, 4: False},
'bar': {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e'},
'bla': {0: 1, 1: 2, 2: 3, 3: 4, 4: np.nan},
'mixed': {0: 'a', 1: 'b', 2: 'c', 3: np.nan, 4: 1}}
df = | pd.DataFrame(d) | pandas.DataFrame |
import pandas as pd
import tqdm
from pynput import keyboard
import bird_view.utils.bz_utils as bzu
import bird_view.utils.carla_utils as cu
from bird_view.models.common import crop_birdview
from perception.utils.helpers import get_segmentation_tensor
from perception.utils.segmentation_labels import DEFAULT_CLASSES
from perception.utils.visualization import get_rgb_segmentation, get_segmentation_colors
def _paint(observations, control, diagnostic, debug, env, show=False, use_cv=False, trained_cv=False):
import cv2
import numpy as np
WHITE = (255, 255, 255)
RED = (255, 0, 0)
CROP_SIZE = 192
X = 176
Y = 192 // 2
R = 2
birdview = cu.visualize_birdview(observations['birdview'])
birdview = crop_birdview(birdview)
if 'big_cam' in observations:
canvas = np.uint8(observations['big_cam']).copy()
rgb = np.uint8(observations['rgb']).copy()
else:
canvas = np.uint8(observations['rgb']).copy()
def _stick_together(a, b, axis=1):
if axis == 1:
h = min(a.shape[0], b.shape[0])
r1 = h / a.shape[0]
r2 = h / b.shape[0]
a = cv2.resize(a, (int(r1 * a.shape[1]), int(r1 * a.shape[0])))
b = cv2.resize(b, (int(r2 * b.shape[1]), int(r2 * b.shape[0])))
return np.concatenate([a, b], 1)
else:
h = min(a.shape[1], b.shape[1])
r1 = h / a.shape[1]
r2 = h / b.shape[1]
a = cv2.resize(a, (int(r1 * a.shape[1]), int(r1 * a.shape[0])))
b = cv2.resize(b, (int(r2 * b.shape[1]), int(r2 * b.shape[0])))
return np.concatenate([a, b], 0)
def _stick_together_and_fill(a, b):
# sticks together a and b.
# a should be wider than b, and b will be filled with black pixels to match a's width.
w_diff = a.shape[1] - b.shape[1]
fill = np.zeros(shape=(b.shape[0], w_diff, 3), dtype=np.uint8)
b_filled = np.concatenate([b, fill], axis=1)
return np.concatenate([a, b_filled], axis=0)
def _write(text, i, j, canvas=canvas, fontsize=0.4):
rows = [x * (canvas.shape[0] // 10) for x in range(10+1)]
cols = [x * (canvas.shape[1] // 9) for x in range(9+1)]
cv2.putText(
canvas, text, (cols[j], rows[i]),
cv2.FONT_HERSHEY_SIMPLEX, fontsize, WHITE, 1)
_command = {
1: 'LEFT',
2: 'RIGHT',
3: 'STRAIGHT',
4: 'FOLLOW',
}.get(observations['command'], '???')
if 'big_cam' in observations:
fontsize = 0.8
else:
fontsize = 0.4
_write('Command: ' + _command, 1, 0, fontsize=fontsize)
_write('Velocity: %.1f' % np.linalg.norm(observations['velocity']), 2, 0, fontsize=fontsize)
_write('Steer: %.2f' % control.steer, 4, 0, fontsize=fontsize)
_write('Throttle: %.2f' % control.throttle, 5, 0, fontsize=fontsize)
_write('Brake: %.1f' % control.brake, 6, 0, fontsize=fontsize)
_write('Collided: %s' % diagnostic['collided'], 1, 6, fontsize=fontsize)
_write('Invaded: %s' % diagnostic['invaded'], 2, 6, fontsize=fontsize)
_write('Lights Ran: %d/%d' % (env.traffic_tracker.total_lights_ran, env.traffic_tracker.total_lights), 3, 6, fontsize=fontsize)
_write('Goal: %.1f' % diagnostic['distance_to_goal'], 4, 6, fontsize=fontsize)
_write('Time: %d' % env._tick, 5, 6, fontsize=fontsize)
_write('Time limit: %d' % env._timeout, 6, 6, fontsize=fontsize)
_write('FPS: %.2f' % (env._tick / (diagnostic['wall'])), 7, 6, fontsize=fontsize)
for x, y in debug.get('locations', []):
x = int(X - x / 2.0 * CROP_SIZE)
y = int(Y + y / 2.0 * CROP_SIZE)
S = R // 2
birdview[x-S:x+S+1,y-S:y+S+1] = RED
for x, y in debug.get('locations_world', []):
x = int(X - x * 4)
y = int(Y + y * 4)
S = R // 2
birdview[x-S:x+S+1,y-S:y+S+1] = RED
for x, y in debug.get('locations_birdview', []):
S = R // 2
birdview[x-S:x+S+1,y-S:y+S+1] = RED
for x, y in debug.get('locations_pixel', []):
S = R // 2
if 'big_cam' in observations:
rgb[y-S:y+S+1,x-S:x+S+1] = RED
else:
canvas[y-S:y+S+1,x-S:x+S+1] = RED
for x, y in debug.get('curve', []):
x = int(X - x * 4)
y = int(Y + y * 4)
try:
birdview[x,y] = [155, 0, 155]
except:
pass
if 'target' in debug:
x, y = debug['target'][:2]
x = int(X - x * 4)
y = int(Y + y * 4)
birdview[x-R:x+R+1,y-R:y+R+1] = [0, 155, 155]
#ox, oy = observations['orientation']
#rot = np.array([
# [ox, oy],
# [-oy, ox]])
#u = observations['node'] - observations['position'][:2]
#v = observations['next'] - observations['position'][:2]
#u = rot.dot(u)
#x, y = u
#x = int(X - x * 4)
#y = int(Y + y * 4)
#v = rot.dot(v)
#x, y = v
#x = int(X - x * 4)
#y = int(Y + y * 4)
if 'big_cam' in observations:
_write('Network input/output', 1, 0, canvas=rgb)
_write('Projected output', 1, 0, canvas=birdview)
full = _stick_together(rgb, birdview)
else:
full = _stick_together(canvas, birdview)
if 'image' in debug:
full = _stick_together(full, cu.visualize_predicted_birdview(debug['image'], 0.01))
if 'big_cam' in observations:
full = _stick_together(canvas, full, axis=0)
if use_cv:
semseg = get_segmentation_tensor(observations["semseg"].copy(), classes=DEFAULT_CLASSES)
class_colors = get_segmentation_colors(len(DEFAULT_CLASSES) + 1, class_indxs=DEFAULT_CLASSES)
semseg_rgb = get_rgb_segmentation(semantic_image=semseg, class_colors=class_colors)
semseg_rgb = np.uint8(semseg_rgb)
full = _stick_together_and_fill(full, semseg_rgb)
depth = np.uint8(observations["depth"]).copy()
depth = np.expand_dims(depth, axis=2)
depth = np.repeat(depth, 3, axis=2)
full = _stick_together_and_fill(full, depth)
if trained_cv:
semseg = observations["semseg"].copy()
class_colors = get_segmentation_colors(len(DEFAULT_CLASSES) + 1, class_indxs=DEFAULT_CLASSES)
semseg_rgb = get_rgb_segmentation(semantic_image=semseg, class_colors=class_colors)
semseg_rgb = np.uint8(semseg_rgb)
full = _stick_together_and_fill(full, semseg_rgb)
depth = cv2.normalize(observations["depth"].copy(), None, alpha=0, beta=255,
norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
depth = np.uint8(depth)
depth = np.expand_dims(depth, axis=2)
depth = np.repeat(depth, 3, axis=2)
full = _stick_together_and_fill(full, depth)
if show:
bzu.show_image('canvas', full)
bzu.add_to_video(full)
manual_break = False
def run_single(env, weather, start, target, agent_maker, seed, autopilot, show=False, move_camera=False,
use_cv=False, trained_cv=False):
# HACK: deterministic vehicle spawns.
env.seed = seed
env.init(start=start, target=target, weather=cu.PRESET_WEATHERS[weather])
print("Spawn points: ", (start, target))
if not autopilot:
agent = agent_maker()
else:
agent = agent_maker(env._player, resolution=1, threshold=7.5)
agent.set_route(env._start_pose.location, env._target_pose.location)
diagnostics = list()
result = {
'weather': weather,
'start': start, 'target': target,
'success': None, 't': None,
'total_lights_ran': None,
'total_lights': None,
'collided': None,
}
i = 0
listener = keyboard.Listener(on_release=on_release)
listener.start()
while env.tick():
if i % 50 == 0 and move_camera:
env.move_spectator_to_player()
i = 0 if not move_camera else i + 1
observations = env.get_observations()
if autopilot:
control, _, _, _ = agent.run_step(observations)
else:
control = agent.run_step(observations)
diagnostic = env.apply_control(control)
_paint(observations, control, diagnostic, agent.debug, env, show=show, use_cv=use_cv, trained_cv=trained_cv)
diagnostic.pop('viz_img')
diagnostics.append(diagnostic)
global manual_break
if env.is_failure() or env.is_success() or manual_break:
result['success'] = env.is_success()
result['total_lights_ran'] = env.traffic_tracker.total_lights_ran
result['total_lights'] = env.traffic_tracker.total_lights
result['collided'] = env.collided
result['t'] = env._tick
if manual_break:
print("Manual break activated")
result['success'] = False
manual_break = False
if not result['success']:
print("Evaluation route failed! Start: {}, Target: {}, Weather: {}".format(result["start"],
result["target"],
result["weather"]))
break
listener.stop()
return result, diagnostics
def on_release(key):
#print('{0} released'.format(key))
if key == keyboard.Key.page_down:
#print("pgdown pressed")
global manual_break
manual_break = True
def run_benchmark(agent_maker, env, benchmark_dir, seed, autopilot, resume, max_run=5, show=False, move_camera=False,
use_cv=False, trained_cv=False):
"""
benchmark_dir must be an instance of pathlib.Path
"""
summary_csv = benchmark_dir / 'summary.csv'
diagnostics_dir = benchmark_dir / 'diagnostics'
diagnostics_dir.mkdir(parents=True, exist_ok=True)
summary = list()
total = len(list(env.all_tasks))
if summary_csv.exists() and resume:
summary = pd.read_csv(summary_csv)
else:
summary = pd.DataFrame()
num_run = 0
for weather, (start, target), run_name in tqdm.tqdm(env.all_tasks, initial=1, total=total):
if resume and len(summary) > 0 and ((summary['start'] == start) \
& (summary['target'] == target) \
& (summary['weather'] == weather)).any():
print (weather, start, target)
continue
diagnostics_csv = str(diagnostics_dir / ('%s.csv' % run_name))
bzu.init_video(save_dir=str(benchmark_dir / 'videos'), save_path=run_name)
result, diagnostics = run_single(env, weather, start, target, agent_maker, seed, autopilot, show=show,
move_camera=move_camera, use_cv=use_cv, trained_cv=trained_cv)
summary = summary.append(result, ignore_index=True)
# Do this every timestep just in case.
| pd.DataFrame(summary) | pandas.DataFrame |
#!/usr/bin/env python
from __future__ import print_function
import warnings
import pandas as pd
from tabulate import tabulate
from matplotlib import pyplot as plt
import matplotlib
import numpy as np
import cPickle
######################################
warnings.filterwarnings('ignore')
pd.options.display.max_columns = 100
matplotlib.style.use('ggplot')
pd.options.display.max_rows = 100
######################################
train = pd.read_csv('../misc/data/train.csv')
test = pd.read_csv('../misc/data/test.csv')
# Prints the head of data prettily :)
# print(tabulate(train.head(), headers='keys', tablefmt='psql'))
# Describes the data stats
# print(tabulate(train.describe(), headers='keys', tablefmt='psql'))
# Imputing 'Age' column with median values
train['Age'].fillna(train['Age'].median(), inplace=True)
surv_sex = train[train['Survived'] == 1]['Sex'].value_counts()
dead_sex = train[train['Survived'] == 0]['Sex'].value_counts()
# Create graph for SurvivalRate w.r.t Gender
# df = pd.DataFrame([surv_sex, dead_sex])
# df.index = ['Survived', 'Dead']
# df.plot(kind='bar', stacked=True, figsize=(15, 8))
# plt.show()
surv_age = train[train['Survived'] == 1]['Age']
dead_age = train[train['Survived'] == 0]['Age']
# In order to tabulate a 1D array,
# reshape the array into 2D array as
# tabulate only allows 2D arrays as input
# surv_age = np.reshape(surv_age, (-1, 1))
# print(tabulate(surv_age[:20, :], headers='keys', tablefmt='psql'))
# Create a graph for SurvivalRate w.r.t Age
# plt.hist([surv_age, dead_age], stacked=True, color=['g', 'r'], bins=30, label=['Survived', 'Dead'])
# plt.xlabel('Age')
# plt.ylabel('Number of Passengers')
# plt.legend()
# plt.show()
surv_fare = train[train['Survived'] == 1]['Fare']
dead_fare = train[train['Survived'] == 0]['Fare']
# Create a graph for SurvivalRate w.r.t Fare
# plt.hist([surv_fare, dead_fare], stacked=True, color=['g', 'r'], bins=30, label=['Survived', 'Dead'])
# plt.xlabel('Fare')
# plt.ylabel('Number of Passengers')
# plt.legend()
# plt.show()
# Graph
# plt.figure(figsize=(15, 8))
# ax = plt.subplot()
# ax.scatter(surv_age, surv_fare, c='green', s=40)
# ax.scatter(dead_age, dead_fare, c='red', s=40)
# Graph
# ax.set_xlabel('Age')
# ax.set_ylabel('Fare')
# ax.legend(('survived', 'dead'), scatterpoints=1, loc='upper right', fontsize=15)
# plt.show()
# Graph
# ax = plt.subplot()
# ax.set_ylabel('Average Fare')
# train.groupby('Pclass').mean()['Fare'].plot(kind='bar', figsize=(15, 8), ax=ax)
# plt.show()
surv_embark = train[train['Survived'] == 1]['Embarked'].value_counts()
dead_embark = train[train['Survived'] == 0]['Embarked'].value_counts()
# Create a graph for SurvivalRate w.r.t EmbarkedPosition
# df = pd.DataFrame([surv_embark, dead_embark])
# df.index = ['Survived', 'Dead']
# df.plot(kind='bar', stacked=True, figsize=(15, 8))
# plt.show()
def status(feature):
print('processing', feature, ': OK')
# Feature Engineering
def getCombinedData():
test = pd.read_csv('../misc/data/test.csv')
train = pd.read_csv('../misc/data/train.csv')
# Extracting, then removing targets from training data
targets = train.Survived
train.drop('Survived', 1, inplace=True)
# merging train and test data for feature engineering
combined = train.append(test)
combined.reset_index(inplace=True)
combined.drop('index', inplace=True, axis=1)
return combined
combined = getCombinedData()
# pretty-print combined data
# print(combined.shape)
# print(tabulate(combined.describe(), headers='keys', tablefmt='psql'))
# print(tabulate(combined[:100][:], headers='keys', tablefmt='psql'))
def getTitles():
global combined
# extract title from each name
combined['Title'] = combined['Name'].map(lambda name:name.split(',')[1].split('.')[0].strip())
# mapping titles
Title_Dictionary = {
"Capt": "Officer",
"Col": "Officer",
"Major": "Officer",
"Jonkheer": "Royalty",
"Don": "Royalty",
"Sir" : "Royalty",
"Dr": "Officer",
"Rev": "Officer",
"the Countess": "Royalty",
"Dona": "Royalty",
"Mme": "Mrs",
"Mlle": "Miss",
"Ms": "Mrs",
"Mr" : "Mr",
"Mrs" : "Mrs",
"Miss" : "Miss",
"Master" : "Master",
"Lady" : "Royalty"
}
# mapping title to dictionary_val
combined['Title'] = combined.Title.map(Title_Dictionary)
getTitles()
# pretty-print combined data
# print(combined.shape)
# print(tabulate(combined.describe(), headers='keys', tablefmt='psql'))
# print(tabulate(combined[:100][:], headers='keys', tablefmt='psql'))
# imputing 'Age' values according to the section the person belongs
# instead of taking median of values
# in order to understand the reason for this method,
# run the following commands :
#####################################################################
# features = ['Sex', 'Pclass', 'Title']
# grouped = combined.groupby(features)
# print(tabulate(grouped.median(), headers='keys', tablefmt='psql'))
#####################################################################
# notice that different sections of people [differentiated by `features`]
# have different medians of age
def processAge():
global combined
def fillAges(row):
if row['Sex']=='female' and row['Pclass'] == 1:
if row['Title'] == 'Miss':
return 30
elif row['Title'] == 'Mrs':
return 45
elif row['Title'] == 'Officer':
return 49
elif row['Title'] == 'Royalty':
return 39
elif row['Sex']=='female' and row['Pclass'] == 2:
if row['Title'] == 'Miss':
return 20
elif row['Title'] == 'Mrs':
return 30
elif row['Sex']=='female' and row['Pclass'] == 3:
if row['Title'] == 'Miss':
return 18
elif row['Title'] == 'Mrs':
return 31
elif row['Sex']=='male' and row['Pclass'] == 1:
if row['Title'] == 'Master':
return 6
elif row['Title'] == 'Mr':
return 41.5
elif row['Title'] == 'Officer':
return 52
elif row['Title'] == 'Royalty':
return 40
elif row['Sex']=='male' and row['Pclass'] == 2:
if row['Title'] == 'Master':
return 2
elif row['Title'] == 'Mr':
return 30
elif row['Title'] == 'Officer':
return 41.5
elif row['Sex']=='male' and row['Pclass'] == 3:
if row['Title'] == 'Master':
return 6
elif row['Title'] == 'Mr':
return 26
combined.Age = combined.apply(lambda r: fillAges(r) if np.isnan(r['Age']) else r['Age'], axis=1)
status('age')
processAge()
# print(combined.info())
def processNames():
global combined
# clean-up of `Name` variable
combined.drop('Name', axis=1, inplace=True)
titles_dummies = pd.get_dummies(combined['Title'], prefix='Title')
combined = pd.concat([combined, titles_dummies], axis=1)
combined.drop('Title', axis=1, inplace=True)
status('names')
processNames()
# print(tabulate(combined.head(), headers='keys', tablefmt='psql'))
def processFares():
global combined
combined.Fare.fillna(combined.Fare.mean(), inplace=True)
status('fare')
processFares()
def processEmbarked():
global combined
# two missing embarked values - filling them with the most frequent one (S)
combined.Embarked.fillna('S',inplace=True)
# dummy encoding
embarked_dummies = pd.get_dummies(combined['Embarked'],prefix='Embarked')
combined = pd.concat([combined,embarked_dummies],axis=1)
combined.drop('Embarked',axis=1,inplace=True)
status('embarked')
processEmbarked()
def processCabin():
global combined
# replacing missing cabins with U (for Uknown)
combined.Cabin.fillna('U',inplace=True)
# mapping each Cabin value with the cabin letter
combined['Cabin'] = combined['Cabin'].map(lambda c : c[0])
# dummy encoding ...
cabin_dummies = | pd.get_dummies(combined['Cabin'],prefix='Cabin') | pandas.get_dummies |
#!/usr/bin/env python3
"""Parse Postgres log to retrieve the dataset ids and the IP of the API users."""
import argparse
import glob
import gzip
import ipaddress
import json
import logging
import os
from urllib.parse import unquote
import dateutil
import pandas as pd
import sqlalchemy as sqla
# CONSTANTS
logging.basicConfig(level=logging.INFO)
# Limit to these postgrest queries
QUERY_STRINGS = ["enermaps_get_legend"]
# Limit to these caddy's URIS
URIs = [
"/enermaps/api/datasets/legend/",
"/enermaps/api/db/rpc/enermaps_get_legend?",
"/enermaps/api/db/rpc/enermaps_query_geojson?",
"/enermaps/api/db/rpc/enermaps_query_table?",
]
BASE_PATH_PG = "/stats/pg-logs/"
BASE_PATH_CADDY = "/stats/caddy-logs/"
SEL_COLS = ["timestamp", "ds_id", "country", "function", "json_query", "source"]
DB_HOST = os.environ.get("DB_HOST")
DB_PORT = os.environ.get("DB_PORT")
DB_USER = os.environ.get("DB_USER")
DB_PASSWORD = os.environ.get("DB_PASSWORD")
DB_DB = os.environ.get("DB_DB")
DB_URL = "postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_DB}".format(
DB_HOST=DB_HOST,
DB_PORT=DB_PORT,
DB_USER=DB_USER,
DB_PASSWORD=DB_PASSWORD,
DB_DB=DB_DB,
)
LOCAL_TZ = "Europe/Zurich"
tzmapping = {
"CET": dateutil.tz.gettz("Europe/Zurich"),
"CEST": dateutil.tz.gettz("Europe/Zurich"),
}
def getCountry(log: pd.DataFrame):
"""Geolocate ip address."""
def findCountry(ip, db, information="code"):
try:
ip = int(ipaddress.ip_address(ip))
country = db.loc[(db["start"] < ip) & (db["end"] > ip), information].values[
0
]
except ValueError:
country = None
return country
db = pd.read_csv("IP2LOCATION-LITE-DB1/IP2LOCATION-LITE-DB1.CSV", header=None)
db.columns = ["start", "end", "code", "country"]
log["country"] = ""
log["country"] = log["ip"].apply(lambda x: findCountry(x, db))
log["country"] = log["country"].replace({"-": None})
return log
def safelyJSONdecode(s: str):
"Decode json-like string to dict, or keep it as a raw string."
try:
js = json.loads(s)
except (json.decoder.JSONDecodeError, TypeError):
js = {"raw_string": r"{}".format(s)}
return js
def parseCADDYlog(log_file: str):
"""Parse the original caddy log file."""
if log_file.endswith("gz"):
with gzip.open(log_file, "rb") as f:
dicts = f.read().splitlines()
else:
with open(log_file, "r") as f:
dicts = f.read().splitlines()
dicts = [json.loads(x) for x in dicts]
log = | pd.DataFrame.from_records(dicts) | pandas.DataFrame.from_records |
from io import StringIO
import pandas as pd
import numpy as np
import pytest
import bioframe
import bioframe.core.checks as checks
# import pyranges as pr
# def bioframe_to_pyranges(df):
# pydf = df.copy()
# pydf.rename(
# {"chrom": "Chromosome", "start": "Start", "end": "End"},
# axis="columns",
# inplace=True,
# )
# return pr.PyRanges(pydf)
# def pyranges_to_bioframe(pydf):
# df = pydf.df
# df.rename(
# {"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
# axis="columns",
# inplace=True,
# )
# return df
# def pyranges_overlap_to_bioframe(pydf):
# ## convert the df output by pyranges join into a bioframe-compatible format
# df = pydf.df.copy()
# df.rename(
# {
# "Chromosome": "chrom_1",
# "Start": "start_1",
# "End": "end_1",
# "Start_b": "start_2",
# "End_b": "end_2",
# },
# axis="columns",
# inplace=True,
# )
# df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
# df["chrom_2"] = df["chrom_1"].values
# return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
### select with non-standard column names
region1 = "chrX:4-6"
new_names = ["chr", "chrstart", "chrend"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=new_names,
)
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]],
columns=new_names,
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
region1 = "chrX"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
### select from a DataFrame with NaNs
colnames = ["chrom", "start", "end", "view_region"]
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_result = pd.DataFrame(
[["chr1", -6, 12, "chr1p"]],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
region1 = "chr1:0-1"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df, region1).reset_index(drop=True)
)
def test_trim():
### trim with view_df
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 32, 36, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 26, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
with pytest.raises(ValueError):
bioframe.trim(df, view_df=view_df)
# df_view_col already exists, so need to specify it:
pd.testing.assert_frame_equal(
df_trimmed, bioframe.trim(df, view_df=view_df, df_view_col="view_region")
)
### trim with view_df interpreted from dictionary for chromsizes
chromsizes = {"chr1": 20, "chrX_0": 5}
df = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX_0", 1, 8],
],
columns=["chrom", "startFunky", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 20],
["chrX_0", 1, 5],
],
columns=["chrom", "startFunky", "end"],
).astype({"startFunky": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(
df,
view_df=chromsizes,
cols=["chrom", "startFunky", "end"],
return_view_columns=False,
),
)
### trim with default limits=None and negative values
df = pd.DataFrame(
[
["chr1", -4, 12],
["chr1", 13, 26],
["chrX", -5, -1],
],
columns=["chrom", "start", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX", 0, 0],
],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim when there are NaN intervals
df = pd.DataFrame(
[
["chr1", -4, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", -5, -1, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 0, 0, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim with view_df and NA intervals
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12],
["chr1", 0, 12],
[pd.NA, pd.NA, pd.NA],
["chrX", 1, 20],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, pd.NA],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
# infer df_view_col with assign_view and ignore NAs
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(df, view_df=view_df, df_view_col=None, return_view_columns=True)[
["chrom", "start", "end", "view_region"]
],
)
def test_expand():
d = """chrom start end
0 chr1 1 5
1 chr1 50 55
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+")
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 -9 15
1 chr1 40 65
2 chr2 90 210"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with negative pad
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 110 190"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp, side="left")
d = """chrom start end
0 chr1 3 5
1 chr1 52 55
2 chr2 110 200"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with multiplicative pad
mult = 0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 150 150"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
mult = 2.0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 -1 7
1 chr1 48 58
2 chr2 50 250"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with NA and non-integer multiplicative pad
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
mult = 1.10
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 95 205"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df, fake_expanded)
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert df1.equals(df2) == False
# p1 = bioframe_to_pyranges(df1)
# p2 = bioframe_to_pyranges(df2)
# pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# bb = bioframe.overlap(df1, df2, how="inner")[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# pp = pp.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# bb = bb.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
# print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 2
b = bioframe.overlap(
df1,
df2,
on=None,
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 0
### test overlap 'left', 'outer', and 'right'
b = bioframe.overlap(
df1,
df2,
on=None,
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 5
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="inner",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 0
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="right",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 2
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
### test keep_order and NA handling
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+"],
[pd.NA, pd.NA, pd.NA, "-"],
["chrX", 1, 8, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, "+"], [pd.NA, pd.NA, pd.NA, "-"], ["chrX", 7, 10, "-"]],
columns=["chrom2", "start2", "end2", "strand"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=True, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
assert ~df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=False, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chrX", 1, 8, pd.NA, pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, pd.NA, "tiger"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert (
bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
keep_order=False,
).shape
== (3, 12)
)
### result of overlap should still have bedframe-like properties
overlap_df = bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
overlap_df = bioframe.overlap(
df1,
df2,
how="innter",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
# test keep_order incompatible if how!= 'left'
with pytest.raises(ValueError):
bioframe.overlap(
df1,
df2,
how="outer",
on=["animal"],
cols2=["chrom2", "start2", "end2"],
keep_order=True,
)
def test_cluster():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 1])
).all() # the last interval does not overlap the first three
df_annotated = bioframe.cluster(df1, min_dist=2)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 0])
).all() # all intervals part of the same cluster
df_annotated = bioframe.cluster(df1, min_dist=None)
assert (
df_annotated["cluster"].values == np.array([0, 0, 1, 2])
).all() # adjacent intervals not clustered
df1.iloc[0, 0] = "chrX"
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([2, 0, 0, 1])
).all() # do not cluster intervals across chromosomes
# test consistency with pyranges (which automatically sorts df upon creation and uses 1-based indexing for clusters)
# assert (
# (bioframe_to_pyranges(df1).cluster(count=True).df["Cluster"].values - 1)
# == bioframe.cluster(df1.sort_values(["chrom", "start"]))["cluster"].values
# ).all()
# test on=[] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert (
bioframe.cluster(df1, on=["animal"])["cluster"].values == np.array([0, 1, 0, 2])
).all()
assert (
bioframe.cluster(df1, on=["strand"])["cluster"].values == np.array([0, 1, 1, 2])
).all()
assert (
bioframe.cluster(df1, on=["location", "animal"])["cluster"].values
== np.array([0, 2, 1, 3])
).all()
### test cluster with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.cluster(df1)["cluster"].max() == 3
assert bioframe.cluster(df1, on=["strand"])["cluster"].max() == 4
pd.testing.assert_frame_equal(df1, bioframe.cluster(df1)[df1.columns])
assert checks.is_bedframe(
bioframe.cluster(df1, on=["strand"]),
cols=["chrom", "cluster_start", "cluster_end"],
)
assert checks.is_bedframe(
bioframe.cluster(df1), cols=["chrom", "cluster_start", "cluster_end"]
)
assert checks.is_bedframe(bioframe.cluster(df1))
def test_merge():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
# the last interval does not overlap the first three with default min_dist=0
assert (bioframe.merge(df1)["n_intervals"].values == np.array([3, 1])).all()
# adjacent intervals are not clustered with min_dist=none
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values == np.array([2, 1, 1])
).all()
# all intervals part of one cluster
assert (
bioframe.merge(df1, min_dist=2)["n_intervals"].values == np.array([4])
).all()
df1.iloc[0, 0] = "chrX"
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values
== np.array([1, 1, 1, 1])
).all()
assert (
bioframe.merge(df1, min_dist=0)["n_intervals"].values == np.array([2, 1, 1])
).all()
# total number of intervals should equal length of original dataframe
mock_df = mock_bioframe()
assert np.sum(bioframe.merge(mock_df, min_dist=0)["n_intervals"].values) == len(
mock_df
)
# # test consistency with pyranges
# pd.testing.assert_frame_equal(
# pyranges_to_bioframe(bioframe_to_pyranges(df1).merge(count=True)),
# bioframe.merge(df1),
# check_dtype=False,
# check_exact=False,
# )
# test on=['chrom',...] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert len(bioframe.merge(df1, on=None)) == 2
assert len(bioframe.merge(df1, on=["strand"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location", "animal"])) == 4
d = """ chrom start end animal n_intervals
0 chr1 3 10 cat 2
1 chr1 3 8 dog 1
2 chrX 6 10 cat 1"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.merge(df1, on=["animal"]),
check_dtype=False,
)
# merge with repeated indices
df = pd.DataFrame(
{"chrom": ["chr1", "chr2"], "start": [100, 400], "end": [110, 410]}
)
df.index = [0, 0]
pd.testing.assert_frame_equal(
df.reset_index(drop=True), bioframe.merge(df)[["chrom", "start", "end"]]
)
# test merge with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.merge(df1).shape[0] == 4
assert bioframe.merge(df1)["start"].iloc[0] == 1
assert bioframe.merge(df1)["end"].iloc[0] == 12
assert bioframe.merge(df1, on=["strand"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[1] == df1.shape[1] + 1
assert checks.is_bedframe(bioframe.merge(df1, on=["strand", "animal"]))
def test_complement():
### complementing a df with no intervals in chrX by a view with chrX should return entire chrX region
df1 = pd.DataFrame(
[["chr1", 1, 5], ["chr1", 3, 8], ["chr1", 8, 10], ["chr1", 12, 14]],
columns=["chrom", "start", "end"],
)
df1_chromsizes = {"chr1": 100, "chrX": 100}
df1_complement = pd.DataFrame(
[
["chr1", 0, 1, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with two chromosomes ###
df1.iloc[0, 0] = "chrX"
df1_complement = pd.DataFrame(
[
["chr1", 0, 3, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 1, "chrX:0-100"],
["chrX", 5, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with no view_df and a negative interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-9223372036854775807"],
["chr1", 20, np.iinfo(np.int64).max, "chr1:0-9223372036854775807"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1), df1_complement)
### test complement with an overhanging interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
chromsizes = {"chr1": 15}
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-15"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=chromsizes, view_name_col="VR"), df1_complement
)
### test complement where an interval from df overlaps two different regions from view
### test complement with no view_df and a negative interval
df1 = pd.DataFrame([["chr1", 5, 15]], columns=["chrom", "start", "end"])
chromsizes = [("chr1", 0, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
df1_complement = pd.DataFrame(
[["chr1", 0, 5, "chr1p"], ["chr1", 15, 20, "chr1q"]],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
### test complement with NAs
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 5, 15], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
with pytest.raises(ValueError): # no NAs allowed in chromsizes
bioframe.complement(
df1, [("chr1", pd.NA, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
)
assert checks.is_bedframe(bioframe.complement(df1, chromsizes))
def test_closest():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 4, 8], ["chr1", 10, 11]], columns=["chrom", "start", "end"]
)
### closest(df1,df2,k=1) ###
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 4 8 0"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### closest(df1,df2, ignore_overlaps=True)) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True)
)
### closest(df1,df2,k=2) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 4 8 0
1 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), k=2)
)
### closest(df2,df1) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 4 8 chr1 1 5 0
1 chr1 10 11 chr1 1 5 5 """
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df2, df1, suffixes=("_1", "_2")))
### change first interval to new chrom ###
df2.iloc[0, 0] = "chrA"
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### test other return arguments ###
df2.iloc[0, 0] = "chr1"
d = """
index index_ have_overlap overlap_start overlap_end distance
0 0 0 True 4 5 0
1 0 1 False <NA> <NA> 5
"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.closest(
df1,
df2,
k=2,
return_overlap=True,
return_index=True,
return_input=False,
return_distance=True,
),
check_dtype=False,
)
# closest should ignore empty groups (e.g. from categorical chrom)
df = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
d = """ chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chrX 1 8 chrX 2 10 0
1 chrX 2 10 chrX 1 8 0"""
df_closest = pd.read_csv(StringIO(d), sep=r"\s+")
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df = df.astype({"chrom": df_cat})
pd.testing.assert_frame_equal(
df_closest,
bioframe.closest(df, suffixes=("_1", "_2")),
check_dtype=False,
check_categorical=False,
)
# closest should ignore null rows: code will need to be modified
# as for overlap if an on=[] option is added
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_1": pd.Int64Dtype(),
"end_1": pd.Int64Dtype(),
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True, k=5)
)
with pytest.raises(ValueError): # inputs must be valid bedFrames
df1.iloc[0, 0] = "chr10"
bioframe.closest(df1, df2)
def test_coverage():
#### coverage does not exceed length of original interval
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chr1", 2, 10]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of interval on different chrom returns zero for coverage and n_overlaps
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chrX", 3, 8]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 0 """
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### when a second overlap starts within the first
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8]], columns=["chrom", "start", "end"]
)
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of NA interval returns zero for coverage
df1 = pd.DataFrame(
[
["chr1", 10, 20],
[pd.NA, pd.NA, pd.NA],
["chr1", 3, 8],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
)
df1 = bioframe.sanitize_bedframe(df1)
df2 = bioframe.sanitize_bedframe(df2)
df_coverage = pd.DataFrame(
[
["chr1", 10, 20, 0],
[pd.NA, pd.NA, pd.NA, 0],
["chr1", 3, 8, 5],
[pd.NA, pd.NA, pd.NA, 0],
],
columns=["chrom", "start", "end", "coverage"],
).astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype(), "coverage": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df_coverage, bioframe.coverage(df1, df2))
### coverage without return_input returns a single column dataFrame
assert (
bioframe.coverage(df1, df2, return_input=False)["coverage"].values
== np.array([0, 0, 5, 0])
).all()
def test_subtract():
### no intervals should be left after self-subtraction
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
assert len(bioframe.subtract(df1, df1)) == 0
### no intervals on chrX should remain after subtracting a longer interval
### interval on chr1 should be split.
### additional column should be propagated to children.
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 5, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### no intervals on chrX should remain after subtracting a longer interval
df2 = pd.DataFrame(
[["chrX", 0, 4], ["chr1", 6, 6], ["chrX", 4, 9]],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 6, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### subtracting dataframes funny column names
funny_cols = ["C", "chromStart", "chromStop"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=funny_cols,
)
df1["strand"] = "+"
assert len(bioframe.subtract(df1, df1, cols1=funny_cols, cols2=funny_cols)) == 0
funny_cols2 = ["chr", "st", "e"]
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=funny_cols2,
)
df_result = pd.DataFrame(
[["chr1", 4, 5, "+"], ["chr1", 6, 7, "+"]],
columns=funny_cols + ["strand"],
)
df_result = df_result.astype(
{funny_cols[1]: pd.Int64Dtype(), funny_cols[2]: pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2, cols1=funny_cols, cols2=funny_cols2)
.sort_values(funny_cols)
.reset_index(drop=True),
)
# subtract should ignore empty groups
df1 = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 1, 8],
],
columns=["chrom", "start", "end"],
)
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df1 = df1.astype({"chrom": df_cat})
df_subtracted = pd.DataFrame(
[
["chrX", 8, 10],
],
columns=["chrom", "start", "end"],
)
assert bioframe.subtract(df1, df1).empty
pd.testing.assert_frame_equal(
df_subtracted.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2),
check_dtype=False,
check_categorical=False,
)
## test transferred from deprecated bioframe.split
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 4],
["chr1", 5],
],
columns=["chrom", "pos"],
)
df2["start"] = df2["pos"]
df2["end"] = df2["pos"]
df_result = (
pd.DataFrame(
[
["chrX", 1, 4],
["chrX", 3, 4],
["chrX", 4, 5],
["chrX", 4, 8],
["chr1", 5, 7],
["chr1", 4, 5],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# Test the case when a chromosome should not be split (now implemented with subtract)
df1 = pd.DataFrame(
[
["chrX", 3, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame([["chrX", 4]], columns=["chrom", "pos"])
df2["start"] = df2["pos"].values
df2["end"] = df2["pos"].values
df_result = (
pd.DataFrame(
[
["chrX", 3, 4],
["chrX", 4, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# subtract should ignore null rows
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 1, 5]],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
["chrX", 1, 5],
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_subtracted = pd.DataFrame(
[
["chr1", 1, 4],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_subtracted, bioframe.subtract(df1, df2))
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert len(bioframe.subtract(df1, df2)) == 0 # empty df1 but valid chroms in df2
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df1)
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df2)
def test_setdiff():
cols1 = ["chrom1", "start", "end"]
cols2 = ["chrom2", "start", "end"]
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=cols1 + ["strand", "animal"],
)
df2 = pd.DataFrame(
[
["chrX", 7, 10, "-", "dog"],
["chr1", 6, 10, "-", "cat"],
["chr1", 6, 10, "-", "cat"],
],
columns=cols2 + ["strand", "animal"],
)
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=None,
)
)
== 0
) # everything overlaps
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["animal"],
)
)
== 1
) # two overlap, one remains
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["strand"],
)
)
== 2
) # one overlaps, two remain
# setdiff should ignore nan rows
df1 = pd.concat([pd.DataFrame([pd.NA]), df1, pd.DataFrame([pd.NA])])[
["chrom1", "start", "end", "strand", "animal"]
]
df1 = df1.astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
df2 = pd.concat([pd.DataFrame([pd.NA]), df2, pd.DataFrame([pd.NA])])[
["chrom2", "start", "end", "strand", "animal"]
]
df2 = df2.astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
assert (2, 5) == np.shape(bioframe.setdiff(df1, df1, cols1=cols1, cols2=cols1))
assert (2, 5) == np.shape(bioframe.setdiff(df1, df2, cols1=cols1, cols2=cols2))
assert (4, 5) == np.shape(
bioframe.setdiff(df1, df2, on=["strand"], cols1=cols1, cols2=cols2)
)
def test_count_overlaps():
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[
["chr1", 6, 10, "+", "dog"],
["chr1", 6, 10, "+", "dog"],
["chrX", 7, 10, "+", "dog"],
["chrX", 7, 10, "+", "dog"],
],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
assert (
bioframe.count_overlaps(
df1,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([2, 2, 2])
).all()
assert (
bioframe.count_overlaps(
df1,
df2,
on=["strand"],
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([2, 0, 2])
).all()
assert (
bioframe.count_overlaps(
df1,
df2,
on=["strand", "animal"],
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([0, 0, 0])
).all()
# overlaps with pd.NA
counts_no_nans = bioframe.count_overlaps(
df1,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
df1_na = (pd.concat([pd.DataFrame([pd.NA]), df1, pd.DataFrame([pd.NA])])).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)[["chrom1", "start", "end", "strand", "animal"]]
df2_na = (pd.concat([pd.DataFrame([pd.NA]), df2, pd.DataFrame([pd.NA])])).astype(
{
"start2": pd.Int64Dtype(),
"end2": pd.Int64Dtype(),
}
)[["chrom2", "start2", "end2", "strand", "animal"]]
counts_nans_inserted_after = (
pd.concat([pd.DataFrame([pd.NA]), counts_no_nans, pd.DataFrame([pd.NA])])
).astype({"start": | pd.Int64Dtype() | pandas.Int64Dtype |
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pandas as pd
import os
import shutil
import time
from PIL import Image
pictures_dir = 'D:/Libraries/Documents/Projects/Jenna Paintings/'
def get_data():
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'gcloud_connection/perfect-mess-paints-website-ba51dd8e2002.json', scope) # Your json file here
gc = gspread.authorize(credentials)
wks = gc.open("Perfect Mess Paints").sheet1
data = wks.get_all_values()
headers = data.pop(2)
df = | pd.DataFrame(data[2:], columns=headers) | pandas.DataFrame |
# Script wh helps to plot Figures 3A and 3B
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Include all GENES, those containing Indels and SNVS (that's why I repeat this step of loading "alleles" dataframe) This prevents badly groupping in 20210105_plotStacked...INDELS.py
alleles = pd.read_csv('/path/to/Alleles_20201228.csv',sep='\t')
#alleles['actionable'].loc[(alleles['SYMBOL'] == 'CYP4F2') & (alleles['allele'] == '*2')] = 'Yes'
alleles = alleles.loc[(alleles['count_carrier_ids'].astype(str) != 'nan') & (alleles['actionable'] == 'Yes')].copy()
GENES = list(set(list(alleles['SYMBOL'])))
dff = | pd.read_csv('/path/to/phenotypes_20210107.csv',sep='\t') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~IMPORTS~~~~~~~~~~~~~~ #
# Standard library imports
from collections import *
# Third party imports
import pysam
import pandas as pd
from tqdm import tqdm
# Local imports
from NanoCount.Read import Read
from NanoCount.common import *
# ~~~~~~~~~~~~~~MAIN FUNCTION~~~~~~~~~~~~~~ #
class NanoCount:
# ~~~~~~~~~~~~~~MAGIC METHODS~~~~~~~~~~~~~~ #
def __init__(
self,
alignment_file: str,
count_file: str = "",
filter_bam_out: str = "",
min_alignment_length: int = 50,
keep_suplementary: bool = False,
min_query_fraction_aligned: float = 0.5,
sec_scoring_threshold: float = 0.95,
sec_scoring_value: str = "alignment_score",
convergence_target: float = 0.005,
max_em_rounds: int = 100,
extra_tx_info: bool = False,
primary_score: str = "alignment_score",
max_dist_3_prime: int = 50,
max_dist_5_prime: int = -1,
verbose: bool = False,
quiet: bool = False,
):
"""
Estimate abundance of transcripts using an EM
* alignment_file
Sorted and indexed BAM or SAM file containing aligned ONT dRNA-Seq reads including secondary alignments
* count_file
Output file path where to write estimated counts (TSV format)
* filter_bam_out
Optional output file path where to write filtered reads selected by NanoCount to perform quantification estimation (BAM format)
* min_alignment_length
Minimal length of the alignment to be considered valid
* min_query_fraction_aligned
Minimal fraction of the primary alignment query aligned to consider the read valid
* sec_scoring_threshold
Fraction of the alignment score or the alignment length of secondary alignments compared to the primary alignment to be considered valid
alignments
* sec_scoring_value
Value to use for score thresholding of secondary alignments either "alignment_score" or "alignment_length"
* convergence_target
Convergence target value of the cummulative difference between abundance values of successive EM round to trigger the end of the EM loop.
* max_em_rounds
Maximum number of EM rounds before triggering stop
* extra_tx_info
Add transcripts length and zero coverage transcripts to the output file (required valid bam/sam header)
* primary_score
Method to pick the best alignment for each read. By default ("alignment_score") uses the best alignment score (AS optional field), but it can be changed to
use either the primary alignment defined by the aligner ("primary") or the longest alignment ("alignment_length"). choices = [primary, alignment_score, alignment_length]
* keep_suplementary
Retain any supplementary alignments and considered them like secondary alignments. Discarded by default.
* max_dist_3_prime
Maximum distance of alignment end to 3 prime of transcript. In ONT dRNA-Seq reads are assumed to start from the polyA tail (-1 to deactivate)
* max_dist_5_prime
Maximum distance of alignment start to 5 prime of transcript. In conjunction with max_dist_3_prime it can be used to select near full transcript reads
only (-1 to deactivate).
* verbose
Increase verbosity for QC and debugging
* quiet
Reduce verbosity
"""
# Init package
opt_summary_dict = opt_summary(local_opt=locals())
self.log = get_logger(name="Nanocount", verbose=verbose, quiet=quiet)
self.log.warning("Checking options and input files")
log_dict(opt_summary_dict, self.log.debug, "Options summary")
# Save args in self variables
self.alignment_file = alignment_file
self.count_file = count_file
self.filter_bam_out = filter_bam_out
self.min_alignment_length = min_alignment_length
self.min_query_fraction_aligned = min_query_fraction_aligned
self.sec_scoring_threshold = sec_scoring_threshold
self.sec_scoring_value = sec_scoring_value
self.convergence_target = convergence_target
self.max_em_rounds = max_em_rounds
self.extra_tx_info = extra_tx_info
self.primary_score = primary_score
self.keep_suplementary = keep_suplementary
self.max_dist_5_prime = max_dist_5_prime
self.max_dist_3_prime = max_dist_3_prime
self.log.warning("Initialise Nanocount")
# Collect all alignments grouped by read name
self.log.info("Parse Bam file and filter low quality alignments")
self.read_dict = self._parse_bam()
if self.filter_bam_out:
self.log.info("Write selected alignments to BAM file")
self._write_bam()
# Generate compatibility dict grouped by reads
self.log.info("Generate initial read/transcript compatibility index")
self.compatibility_dict = self._get_compatibility()
# EM loop to calculate abundance and update read-transcript compatibility
self.log.warning("Start EM abundance estimate")
self.em_round = 0
self.convergence = 1
with tqdm(
unit=" rounds",
unit_scale=True,
desc="\tProgress",
disable=(quiet or verbose),
) as pbar:
# Iterate until convergence threshold or max EM round are reached
while self.convergence > self.convergence_target and self.em_round < self.max_em_rounds:
self.em_round += 1
# Calculate abundance from compatibility assignments
self.abundance_dict = self._calculate_abundance()
# Update compatibility assignments
self.compatibility_dict = self._update_compatibility()
# Update counter
pbar.update(1)
self.log.debug("EM Round: {} / Convergence value: {}".format(self.em_round, self.convergence))
self.log.info("Exit EM loop after {} rounds".format(self.em_round))
self.log.info("Convergence value: {}".format(self.convergence))
if not self.convergence <= self.convergence_target:
self.log.error("Convergence target ({}) could not be reached after {} rounds".format(self.convergence_target, self.max_em_rounds))
# Write out results
self.log.warning("Summarize data")
self.log.info("Convert results to dataframe")
self.count_df = pd.DataFrame(self.abundance_dict.most_common(), columns=["transcript_name", "raw"])
self.count_df.set_index("transcript_name", inplace=True, drop=True)
self.log.info("Compute estimated counts and TPM")
self.count_df["est_count"] = self.count_df["raw"] * len(self.read_dict)
self.count_df["tpm"] = self.count_df["raw"] * 1000000
# Add extra transcript info is required
if self.extra_tx_info:
tx_df = self._get_tx_df()
self.count_df = pd.merge(self.count_df, tx_df, left_index=True, right_index=True, how="outer")
# Cleanup and sort
self.count_df.sort_values(by="raw", ascending=False, inplace=True)
self.count_df.fillna(value=0, inplace=True)
self.count_df.index.name = "transcript_name"
if self.count_file:
self.log.info("Write file")
self.count_df.to_csv(self.count_file, sep="\t")
# ~~~~~~~~~~~~~~PRIVATE METHODS~~~~~~~~~~~~~~ #
def _parse_bam(self):
"""
Parse Bam/Sam file, group alignments per reads, filter reads based on
selection criteria and return a dict of valid read/alignments
"""
# Parse bam files
read_dict = defaultdict(Read)
ref_len_dict = OrderedDict()
c = Counter()
with pysam.AlignmentFile(self.alignment_file) as bam:
# Collect reference lengths in dict
for name, length in zip(bam.references, bam.lengths):
ref_len_dict[name] = length
for idx, alignment in enumerate(bam):
if alignment.is_unmapped:
c["Discarded unmapped alignments"] += 1
elif alignment.is_reverse:
c["Discarded negative strand alignments"] += 1
elif not self.keep_suplementary and alignment.is_supplementary:
c["Discarded supplementary alignments"] += 1
elif self.min_alignment_length > 0 and alignment.query_alignment_length < self.min_alignment_length:
c["Discarded short alignments"] += 1
elif self.max_dist_3_prime >= 0 and alignment.reference_end <= ref_len_dict[alignment.reference_name] - self.max_dist_3_prime:
c["Discarded alignment with invalid 3 prime end"] += 1
elif self.max_dist_5_prime >= 0 and alignment.reference_start >= self.max_dist_5_prime:
c["Discarded alignment with invalid 5 prime end"] += 1
else:
c["Valid alignments"] += 1
read_dict[alignment.query_name].add_pysam_alignment(pysam_aligned_segment=alignment, read_idx=idx)
# Write filtered reads counters
log_dict(
d=c,
logger=self.log.info,
header="Summary of alignments parsed in input bam file",
)
# Filter alignments
filtered_read_dict = defaultdict(Read)
c = Counter()
for query_name, read in read_dict.items():
# Check if best alignment is valid
best_alignment = read.get_best_alignment(primary_score=self.primary_score)
# In case the primary alignment was removed by filters
if best_alignment:
if best_alignment.align_score == 0:
c["Reads with zero score"] += 1
elif best_alignment.align_len == 0:
c["Reads with zero len"] += 1
elif best_alignment.query_fraction_aligned < self.min_query_fraction_aligned:
c["Reads with low query fraction aligned"] += 1
else:
filtered_read_dict[query_name].add_alignment(best_alignment)
c["Reads with valid best alignment"] += 1
for alignment in read.get_secondary_alignments_list(primary_score=self.primary_score):
# Filter out secondary alignments based on minimap alignment score
if self.sec_scoring_value == "alignment_score" and alignment.align_score / best_alignment.align_score < self.sec_scoring_threshold:
c["Invalid secondary alignments"] += 1
# Filter out secondary alignments based on minimap alignment length
elif self.sec_scoring_value == "alignment_length" and alignment.align_len / best_alignment.align_len < self.sec_scoring_threshold:
c["Invalid secondary alignments"] += 1
# Select valid secondary alignments
else:
c["Valid secondary alignments"] += 1
filtered_read_dict[query_name].add_alignment(alignment)
else:
c["Reads without best alignment"] += 1
if not "Valid secondary alignments" in c:
self.log.error("No valid secondary alignments found in bam file. Were the reads aligned with minimap `-p 0 -N 10` options ?")
# Write filtered reads counters
log_dict(d=c, logger=self.log.info, header="Summary of reads filtered")
return filtered_read_dict
def _write_bam(self):
""""""
c = Counter()
# Make list of alignments idx to select
selected_read_idx = set()
for read in self.read_dict.values():
for alignment in read.alignment_list:
selected_read_idx.add(alignment.read_idx)
c["Alignments to select"] += 1
# Select from original bam file and write to output bam file
with pysam.AlignmentFile(self.alignment_file) as bam_in:
with pysam.AlignmentFile(self.filter_bam_out, "wb", template=bam_in) as bam_out:
for read_idx, alignment in enumerate(bam_in):
if read_idx in selected_read_idx:
bam_out.write(alignment)
c["Alignments written"] += 1
else:
c["Alignments skipped"] += 1
log_dict(d=c, logger=self.log.info, header="Summary of alignments written to bam")
def _get_compatibility(self):
""""""
compatibility_dict = defaultdict(dict)
for read_name, read in self.read_dict.items():
for alignment in read.alignment_list:
compatibility_dict[read_name][alignment.rname] = score = 1.0 / read.n_alignment
return compatibility_dict
def _calculate_abundance(self):
"""
Calculate the abundance of the transcript set based on read-transcript compatibilities
"""
abundance_dict = Counter()
total = 0
convergence = 0
for read_name, comp in self.compatibility_dict.items():
for ref_name, score in comp.items():
abundance_dict[ref_name] += score
total += score
for ref_name in abundance_dict.keys():
abundance_dict[ref_name] = abundance_dict[ref_name] / total
if self.em_round > 1:
convergence += abs(self.abundance_dict[ref_name] - abundance_dict[ref_name])
if self.em_round == 1:
self.convergence = 1
else:
self.convergence = convergence
return abundance_dict
def _update_compatibility(self):
"""
Update read-transcript compatibility based on transcript abundances
"""
compatibility_dict = defaultdict(dict)
for read_name, comp in self.compatibility_dict.items():
total = 0
for ref_name in comp.keys():
total += self.abundance_dict[ref_name]
for ref_name in comp.keys():
compatibility_dict[read_name][ref_name] = self.abundance_dict[ref_name] / total
return compatibility_dict
def _get_tx_df(self):
"""
Extract transcript info from bam file header
"""
try:
with pysam.AlignmentFile(self.alignment_file) as bam:
references = bam.references
lengths = bam.lengths
return pd.DataFrame(index=references, data=lengths, columns=["transcript_length"])
# If any error return empty DataFrame silently
except Exception:
return | pd.DataFrame() | pandas.DataFrame |
"""Provides functions to load entire benchmark result datasets
"""
import os
import io
import glob
import gzip
import tarfile
import warnings
import numpy
import pandas
from .parse import IorOutput, MdWorkbenchOutput
from .contention import validate_contention_dataset, JobOverlapError, ShortJobError
def _load_ior_output_stream(stream, fname, all_results=None):
"""Recursive function that loads one or more IOR output files
Args:
stream (io.TextIOWrapper): file-like object containing the stdout of
an IOR job or jobs.
fname (str): file name associated with stream.
all_results (pandas.DataFrame or None): Dataframe to which loaded
results should be appended.
Returns:
pandas.DataFrame: all_results with newly loaded data appended
as new rows.
"""
if isinstance(stream, tarfile.TarFile):
for member in stream.getmembers():
handle = stream.extractfile(member)
if handle: # directories will have handle = None
all_results = _load_ior_output_stream(
io.TextIOWrapper(handle),
member.name,
all_results)
else:
result = IorOutput(stream, normalize_results=True)
if not result or 'results' not in result:
warnings.warn('invalid output in {}'.format(fname))
return all_results
result.add_filename_metadata(fname)
results_df = pandas.DataFrame.from_dict(result['results']).dropna(subset=['bw(mib/s)'])
results_df['filename'] = fname
# graft in some columns from summary lines - indices should be the same
summaries_df = pandas.DataFrame.from_dict(result['summaries'])
if 'aggs(mib)' in summaries_df:
if 'stonewall_bytes_moved' in results_df:
na_indices = results_df[results_df['stonewall_bytes_moved'].isna()].index
if na_indices.shape[0] > 0:
results_df.loc[na_indices, 'stonewall_bytes_moved'] = summaries_df.loc[na_indices, 'aggs(mib)'] * 2**20
else:
results_df['stonewall_bytes_moved'] = summaries_df['aggs(mib)'] * 2**20
if all_results is None:
all_results = results_df
else:
if len(all_results.columns) != len(results_df.columns):
warn_str = 'inconsistent input file: {}' + \
' (file only has {:d} of {:d} expected columns)\n' +\
'this file: {}\n' + \
'expected: {}\n' + \
'diff: {}'
warnings.warn(warn_str.format(
fname,
len(results_df.columns),
len(all_results.columns),
','.join(results_df.columns),
','.join(all_results.columns),
','.join(list(set(all_results.columns) ^ set(results_df.columns)))))
all_results = pandas.concat((all_results, results_df))
return all_results
def load_ior_output_files(input_glob):
"""Finds and loads one or more IOR output files.
Args:
input_glob (str): A path or glob to one or more IOR output files. Such
files may be ASCII files, gzipped ASCII, or tar files containing
multiple IOR output ASCII files.
Returns:
pandas.DataFrame: benchmark results from the files matching input_glob
"""
all_results = None
if isinstance(input_glob, str):
input_globs = [input_glob]
else:
input_globs = input_glob
for input_glob in input_globs:
for output_f in glob.glob(input_glob):
if output_f.endswith('.tar') or output_f.endswith('.tgz'):
stream = tarfile.open(output_f, 'r')
elif output_f.endswith('.gz'):
stream = gzip.open(output_f, 'r')
else:
stream = open(output_f, 'r')
all_results = _load_ior_output_stream(stream, output_f, all_results)
if all_results is None:
raise ValueError(f"Non-existent dataset {input_glob}")
all_results = all_results.reset_index(drop=True)
all_results['nproc'] = all_results['nodes'] * all_results['ppn']
if all_results is None:
warnings.warn(f'Found no valid results in {input_glob}!')
else:
print('Found {:d} results in {}.'.format(
all_results.shape[0], ", ".join(input_globs)))
return all_results
def load_ior_vs_setsize_results(input_glob, filter_setsizes_below_gibs=65):
"""Finds and loads IOR output files for performance-vs-setsize analysis.
Args:
input_glob (str): A path or glob to one or more IOR output files. Such
files may be ASCII files, gzipped ASCII, or tar files containing
multiple IOR output ASCII files.
filter_setsizes_below_gibs (int): Exclude measurements that had a
setsize smaller than this value (in GiBs)
Returns:
pandas.DataFrame: benchmark results from the files matching input_glob
"""
results = None
for output_f in glob.glob(input_glob):
if output_f.endswith("gz"):
records = IorOutput(gzip.open(output_f, 'r'), normalize_results=True)
else:
records = IorOutput(open(output_f, 'r'), normalize_results=True)
frame = pandas.DataFrame.from_records(records['results'])
if results is None:
results = frame
else:
results = pandas.concat((results, frame), ignore_index=True)
results['gib_moved'] = results['bw(mib/s)'] * results['total(s)'] / 1024.0
filt = (results['access'] == 'read') | (results['access'] == 'write')
filt &= results['bw(mib/s)'] > 0.0
results = results[filt].sort_values('timestamp').reset_index(drop=True).copy()
results['timestamp'] = results['timestamp'].apply(int)
tmp = results['gib_moved'].values
tmp[1::2] = results['gib_moved'].iloc[::2]
results['setsize_gib'] = tmp
results["setsize_gib_int"] = results['setsize_gib'].apply(numpy.rint).astype(numpy.int32)
filt = results['setsize_gib_int'] >= filter_setsizes_below_gibs
print('Found {:d} runs ({:d} results) in {}.'.format(
results.groupby("setsize_gib_int").count().iloc[0, 0] // 2, # /2 because 1 run = write+read
results.shape[0],
input_glob))
return results[filt].copy()
def load_contention_dataset(dataset_glob, dataset_id=None, as_records=False):
records = []
for filename in glob.glob(dataset_glob):
record = None
for loader in IorOutput, MdWorkbenchOutput:
try:
record = loader(open(filename, "r"), normalize_results=True)['results'][0]
break
except KeyError:
pass
if record is None:
warnings.warn("{} does not contain valid output".format(os.path.basename(filename)))
continue
basename = os.path.basename(filename)
# decode job metadata from filename - new way (secondary_quiet.7p-1s.2125435.out)
if basename.startswith("primary") or basename.startswith("secondary"):
access = record.get("access")
metric = "bw"
if loader == MdWorkbenchOutput:
metric = "metadata"
access = "both"
elif record['ordering'] == 'random':
metric = "iops"
workloadid_contention, nodect, dataset_id, _ = basename.split('.')
workload_id, contention = workloadid_contention.split("_")
primary_nodes = int(nodect.split("p", 1)[0])
secondary_nodes = int(nodect.split("-", 1)[-1].split("s", 1)[0])
record['dataset_id'] = dataset_id
record['workload_id'] = workload_id
else:
# decode job metadata from filename
access_metric_contention, nodect, _, _ = basename.split('.')
access, metric, contention = access_metric_contention.split("_")
primary_nodes = int(nodect.split("b", 1)[0])
secondary_nodes = int(nodect.split("-", 1)[-1].split("i", 1)[0])
# add job metadata to record
record.update({
"primary_nodes": primary_nodes,
"secondary_nodes": secondary_nodes,
"access": access,
"metric": metric,
"contention": contention,
"workload": "{} {}".format(access, metric),
"filename": basename,
})
if dataset_id:
record.update({"dataset_id": dataset_id})
if metric == "bw":
record["performance"] = record["bw(mib/s)"]
elif metric == "iops":
record["performance"] = record["iops"]
elif metric == "metadata":
record["performance"] = record["iops"]
else:
raise ValueError(f"unknown metric {metric}")
records.append(record)
# set the primary workload - always the first to run during the quiet tests
# TODO: think about this - is it correct? shouldn't we ensure that
# primary_nodes and primary_workload are always consistent? as-written,
# the definition used here is dependent on the nature of the ordering
# within the slurm script used to generate the dataset
min_starts = {}
for record in records:
if record["contention"] != "quiet":
continue
dataset_id = record["dataset_id"]
if dataset_id not in min_starts:
min_starts[dataset_id] = {}
rec = min_starts[dataset_id]
if "min start" not in rec or rec["min start"] > record["start"]:
rec.update({
"min start": record["start"],
"primary workload": record["workload"],
})
for record in records:
record["primary_workload"] = min_starts\
.get(record.get("dataset_id"), {})\
.get("primary workload")
if 'workload_id' not in record:
if record["primary_workload"] == record["workload"]:
record["workload_id"] = "primary"
else:
record["workload_id"] = "secondary"
if not records:
raise ValueError("Invalid datasets")
if as_records:
return records
return pandas.DataFrame.from_records(records)
def load_contention_datasets(dataset_glob_map, use_cache=True, validate=True):
"""Loads contention datasets
Args:
dataset_glob_map (dict): Keyed by a path glob that contains exactly
one {} which will be substituted for dataset ids. Values should
be lists of strings, each containing a dataset id which will be
substituted within the key to resolve a set of matching IOR input
files.
use_cache (bool): Attempt to load and/or save the results to an
intermediate cache file.
Returns:
pandas.DataFrame: benchmark results from the files matching input_glob
"""
dataframe = None
new_datasets = 0
for dataset_glob, dataset_ids in dataset_glob_map.items():
if dataset_glob.startswith("_"):
continue
cache_file = None
if use_cache:
filepath = os.path.dirname(dataset_glob)
if '*' in filepath:
warnings.warn(f"* found in {filename}; not using cache")
else:
cache_file = os.path.join(filepath, "dataset_summary.csv")
if cache_file and os.path.isfile(cache_file):
dataframe = pandas.read_csv(cache_file)
print(f"Loaded dataset from {cache_file}")
else:
for dataset_id in dataset_ids:
new_datasets += 1
subframe = load_contention_dataset(
dataset_glob.format(dataset_id),
dataset_id)
if dataframe is None:
dataframe = subframe
else:
dataframe = | pandas.concat((dataframe, subframe)) | pandas.concat |
from collections import OrderedDict
import contextlib
from datetime import datetime, time
from functools import partial
import os
from urllib.error import URLError
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas.util.testing as tm
@contextlib.contextmanager
def ignore_xlrd_time_clock_warning():
"""
Context manager to ignore warnings raised by the xlrd library,
regarding the deprecation of `time.clock` in Python 3.7.
"""
with warnings.catch_warnings():
warnings.filterwarnings(
action="ignore",
message="time.clock has been deprecated",
category=DeprecationWarning,
)
yield
read_ext_params = [".xls", ".xlsx", ".xlsm", ".ods"]
engine_params = [
# Add any engines to test here
# When defusedxml is installed it triggers deprecation warnings for
# xlrd and openpyxl, so catch those here
pytest.param(
"xlrd",
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param(
"openpyxl",
marks=[
td.skip_if_no("openpyxl"),
pytest.mark.filterwarnings("ignore:.*html argument"),
],
),
pytest.param(
None,
marks=[
td.skip_if_no("xlrd"),
pytest.mark.filterwarnings("ignore:.*(tree\\.iter|html argument)"),
],
),
pytest.param("odf", marks=td.skip_if_no("odf")),
]
def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool:
"""
Filter out invalid (engine, ext) pairs instead of skipping, as that
produces 500+ pytest.skips.
"""
engine = engine.values[0]
if engine == "openpyxl" and read_ext == ".xls":
return False
if engine == "odf" and read_ext != ".ods":
return False
if read_ext == ".ods" and engine != "odf":
return False
return True
def _transfer_marks(engine, read_ext):
"""
engine gives us a pytest.param objec with some marks, read_ext is just
a string. We need to generate a new pytest.param inheriting the marks.
"""
values = engine.values + (read_ext,)
new_param = pytest.param(values, marks=engine.marks)
return new_param
@pytest.fixture(
autouse=True,
params=[
_transfer_marks(eng, ext)
for eng in engine_params
for ext in read_ext_params
if _is_valid_engine_ext_pair(eng, ext)
],
)
def engine_and_read_ext(request):
"""
Fixture for Excel reader engine and read_ext, only including valid pairs.
"""
return request.param
@pytest.fixture
def engine(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return engine
@pytest.fixture
def read_ext(engine_and_read_ext):
engine, read_ext = engine_and_read_ext
return read_ext
class TestReaders:
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for read_excel calls.
"""
func = partial(pd.read_excel, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "read_excel", func)
def test_usecols_int(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["A", "B", "C"])
# usecols as int
msg = "Passing an integer for `usecols`"
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols=3)
# usecols as int
with pytest.raises(ValueError, match=msg):
with ignore_xlrd_time_clock_warning():
pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=3
)
def test_usecols_list(self, read_ext, df_ref):
df_ref = df_ref.reindex(columns=["B", "C"])
df1 = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=[0, 2, 3]
)
df2 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols=[0, 2, 3]
)
# TODO add index to xls file)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_usecols_str(self, read_ext, df_ref):
df1 = df_ref.reindex(columns=["A", "B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A:D"
)
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C,D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C,D"
)
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = df_ref.reindex(columns=["B", "C"])
df2 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, usecols="A,C:D")
df3 = pd.read_excel(
"test1" + read_ext, "Sheet2", skiprows=[1], index_col=0, usecols="A,C:D"
)
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
@pytest.mark.parametrize(
"usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]]
)
def test_usecols_diff_positional_int_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["A", "C"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols=usecols
)
tm.assert_frame_equal(result, expected, check_names=False)
@pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]])
def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref):
expected = df_ref[["B", "D"]]
expected.index = range(len(expected))
result = pd.read_excel("test1" + read_ext, "Sheet1", usecols=usecols)
tm.assert_frame_equal(result, expected, check_names=False)
def test_read_excel_without_slicing(self, read_ext, df_ref):
expected = df_ref
result = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str(self, read_ext, df_ref):
expected = df_ref[["C", "D"]]
result = pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=0, usecols="A,D:E"
)
tm.assert_frame_equal(result, expected, check_names=False)
def test_usecols_excel_range_str_invalid(self, read_ext):
msg = "Invalid column name: E1"
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, "Sheet1", usecols="D:E1")
def test_index_col_label_error(self, read_ext):
msg = "list indices must be integers.*, not str"
with pytest.raises(TypeError, match=msg):
pd.read_excel(
"test1" + read_ext, "Sheet1", index_col=["A"], usecols=["A", "C"]
)
def test_index_col_empty(self, read_ext):
# see gh-9208
result = pd.read_excel("test1" + read_ext, "Sheet3", index_col=["A", "B", "C"])
expected = DataFrame(
columns=["D", "E", "F"],
index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", [None, 2])
def test_index_col_with_unnamed(self, read_ext, index_col):
# see gh-18792
result = pd.read_excel("test1" + read_ext, "Sheet4", index_col=index_col)
expected = DataFrame(
[["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"]
)
if index_col:
expected = expected.set_index(expected.columns[index_col])
tm.assert_frame_equal(result, expected)
def test_usecols_pass_non_existent_column(self, read_ext):
msg = (
"Usecols do not match columns, "
"columns expected but not found: " + r"\['E'\]"
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E"])
def test_usecols_wrong_type(self, read_ext):
msg = (
"'usecols' must either be list-like of "
"all strings, all unicode, all integers or a callable."
)
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, usecols=["E1", 0])
def test_excel_stop_iterator(self, read_ext):
parsed = pd.read_excel("test2" + read_ext, "Sheet1")
expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self, read_ext):
parsed = pd.read_excel("test3" + read_ext, "Sheet1")
expected = DataFrame([[np.nan]], columns=["Test"])
tm.assert_frame_equal(parsed, expected)
def test_excel_table(self, read_ext, df_ref):
df1 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0)
df2 = pd.read_excel("test1" + read_ext, "Sheet2", skiprows=[1], index_col=0)
# TODO add index to file
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
df3 = pd.read_excel("test1" + read_ext, "Sheet1", index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_reader_special_dtypes(self, read_ext):
expected = DataFrame.from_dict(
OrderedDict(
[
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
("StrCol", [1, 2, 3, 4, 5]),
# GH5394 - this is why convert_float isn't vectorized
("Str2Col", ["a", 3, "c", "d", "e"]),
(
"DateCol",
[
datetime(2013, 10, 30),
datetime(2013, 10, 31),
datetime(1905, 1, 1),
datetime(2013, 12, 14),
datetime(2015, 3, 14),
],
),
]
)
)
basename = "test_types"
# should read in correctly and infer types
actual = pd.read_excel(basename + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
actual = pd.read_excel(basename + read_ext, "Sheet1", convert_float=False)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = pd.read_excel(basename + read_ext, "Sheet1", index_col=icol)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = pd.read_excel(
basename + read_ext, "Sheet1", converters={"StrCol": str}
)
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
actual = pd.read_excel(
basename + read_ext,
"Sheet1",
convert_float=False,
converters={"StrCol": str},
)
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self, read_ext):
basename = "test_converters"
expected = DataFrame.from_dict(
OrderedDict(
[
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ["Found", "Found", "Found", "Not found", "Found"]),
("StrCol", ["1", np.nan, "3", "4", "5"]),
]
)
)
converters = {
"IntCol": lambda x: int(x) if x != "" else -1000,
"FloatCol": lambda x: 10 * x if x else np.nan,
2: lambda x: "Found" if x != "" else "Not found",
3: lambda x: str(x) if x else "",
}
# should read in correctly and set types of single cells (not array
# dtypes)
actual = pd.read_excel(basename + read_ext, "Sheet1", converters=converters)
tm.assert_frame_equal(actual, expected)
def test_reader_dtype(self, read_ext):
# GH 8212
basename = "testdtype"
actual = pd.read_excel(basename + read_ext)
expected = DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
).reindex(columns=["a", "b", "c", "d"])
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(
basename + read_ext, dtype={"a": "float64", "b": "float32", "c": str}
)
expected["a"] = expected["a"].astype("float64")
expected["b"] = expected["b"].astype("float32")
expected["c"] = ["001", "002", "003", "004"]
tm.assert_frame_equal(actual, expected)
with pytest.raises(ValueError):
pd.read_excel(basename + read_ext, dtype={"d": "int64"})
@pytest.mark.parametrize(
"dtype,expected",
[
(
None,
DataFrame(
{
"a": [1, 2, 3, 4],
"b": [2.5, 3.5, 4.5, 5.5],
"c": [1, 2, 3, 4],
"d": [1.0, 2.0, np.nan, 4.0],
}
),
),
(
{"a": "float64", "b": "float32", "c": str, "d": str},
DataFrame(
{
"a": Series([1, 2, 3, 4], dtype="float64"),
"b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"),
"c": ["001", "002", "003", "004"],
"d": ["1", "2", np.nan, "4"],
}
),
),
],
)
def test_reader_dtype_str(self, read_ext, dtype, expected):
# see gh-20377
basename = "testdtype"
actual = pd.read_excel(basename + read_ext, dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_reading_all_sheets(self, read_ext):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
basename = "test_multisheet"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
# ensure this is not alphabetical to test order preservation
expected_keys = ["Charlie", "Alpha", "Beta"]
tm.assert_contains_all(expected_keys, dfs.keys())
# Issue 9930
# Ensure sheet order is preserved
assert expected_keys == list(dfs.keys())
def test_reading_multiple_specific_sheets(self, read_ext):
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
basename = "test_multisheet"
# Explicitly request duplicates. Only the set should be returned.
expected_keys = [2, "Charlie", "Charlie"]
dfs = pd.read_excel(basename + read_ext, sheet_name=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys, dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_reading_all_sheets_with_blank(self, read_ext):
# Test reading all sheetnames by setting sheetname to None,
# In the case where some sheets are blank.
# Issue #11711
basename = "blank_with_header"
dfs = pd.read_excel(basename + read_ext, sheet_name=None)
expected_keys = ["Sheet1", "Sheet2", "Sheet3"]
tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
def test_read_excel_blank(self, read_ext):
actual = pd.read_excel("blank" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, DataFrame())
def test_read_excel_blank_with_header(self, read_ext):
expected = DataFrame(columns=["col_1", "col_2"])
actual = pd.read_excel("blank_with_header" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
def test_date_conversion_overflow(self, read_ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
expected = pd.DataFrame(
[
[pd.Timestamp("2016-03-12"), "<NAME>"],
[pd.Timestamp("2016-03-16"), "<NAME>"],
[1e20, "<NAME>"],
],
columns=["DateColWithBigInt", "StringCol"],
)
if pd.read_excel.keywords["engine"] == "openpyxl":
pytest.xfail("Maybe not supported by openpyxl")
result = pd.read_excel("testdateoverflow" + read_ext)
tm.assert_frame_equal(result, expected)
def test_sheet_name(self, read_ext, df_ref):
filename = "test1"
sheet_name = "Sheet1"
df1 = pd.read_excel(
filename + read_ext, sheet_name=sheet_name, index_col=0
) # doc
with ignore_xlrd_time_clock_warning():
df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name)
tm.assert_frame_equal(df1, df_ref, check_names=False)
tm.assert_frame_equal(df2, df_ref, check_names=False)
def test_excel_read_buffer(self, read_ext):
pth = "test1" + read_ext
expected = pd.read_excel(pth, "Sheet1", index_col=0)
with open(pth, "rb") as f:
actual = pd.read_excel(f, "Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
def test_bad_engine_raises(self, read_ext):
bad_engine = "foo"
with pytest.raises(ValueError, match="Unknown engine: foo"):
pd.read_excel("", engine=bad_engine)
@tm.network
def test_read_from_http_url(self, read_ext):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/excel/test1" + read_ext
)
url_table = pd.read_excel(url)
local_table = pd.read_excel("test1" + read_ext)
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_not_us_locale
def test_read_from_s3_url(self, read_ext, s3_resource):
# Bucket "pandas-test" created in tests/io/conftest.py
with open("test1" + read_ext, "rb") as f:
s3_resource.Bucket("pandas-test").put_object(Key="test1" + read_ext, Body=f)
url = "s3://pandas-test/test1" + read_ext
url_table = pd.read_excel(url)
local_table = pd.read_excel("test1" + read_ext)
tm.assert_frame_equal(url_table, local_table)
@pytest.mark.slow
# ignore warning from old xlrd
@pytest.mark.filterwarnings("ignore:This metho:PendingDeprecationWarning")
def test_read_from_file_url(self, read_ext, datapath):
# FILE
localtable = os.path.join(datapath("io", "data", "excel"), "test1" + read_ext)
local_table = pd.read_excel(localtable)
try:
url_table = pd.read_excel("file://localhost/" + localtable)
except URLError:
# fails on some systems
import platform
pytest.skip("failing on {}".format(" ".join(platform.uname()).strip()))
tm.assert_frame_equal(url_table, local_table)
def test_read_from_pathlib_path(self, read_ext):
# GH12655
from pathlib import Path
str_path = "test1" + read_ext
expected = pd.read_excel(str_path, "Sheet1", index_col=0)
path_obj = Path("test1" + read_ext)
actual = pd.read_excel(path_obj, "Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
@td.skip_if_no("py.path")
@td.check_file_leaks
def test_read_from_py_localpath(self, read_ext):
# GH12655
from py.path import local as LocalPath
str_path = os.path.join("test1" + read_ext)
expected = pd.read_excel(str_path, "Sheet1", index_col=0)
path_obj = LocalPath().join("test1" + read_ext)
actual = pd.read_excel(path_obj, "Sheet1", index_col=0)
tm.assert_frame_equal(expected, actual)
def test_reader_seconds(self, read_ext):
# Test reading times with and without milliseconds. GH5945.
expected = DataFrame.from_dict(
{
"Time": [
time(1, 2, 3),
time(2, 45, 56, 100000),
time(4, 29, 49, 200000),
time(6, 13, 42, 300000),
time(7, 57, 35, 400000),
time(9, 41, 28, 500000),
time(11, 25, 21, 600000),
time(13, 9, 14, 700000),
time(14, 53, 7, 800000),
time(16, 37, 0, 900000),
time(18, 20, 54),
]
}
)
actual = pd.read_excel("times_1900" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel("times_1904" + read_ext, "Sheet1")
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex(self, read_ext):
# see gh-4679
mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]])
mi_file = "testmultiindex" + read_ext
# "mi_column" sheet
expected = DataFrame(
[
[1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True],
],
columns=mi,
)
actual = pd.read_excel(mi_file, "mi_column", header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# "mi_index" sheet
expected.index = mi
expected.columns = ["a", "b", "c", "d"]
actual = pd.read_excel(mi_file, "mi_index", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "both" sheet
expected.columns = mi
actual = pd.read_excel(mi_file, "both", index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
# "mi_index_name" sheet
expected.columns = ["a", "b", "c", "d"]
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = pd.read_excel(mi_file, "mi_index_name", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# "mi_column_name" sheet
expected.index = list(range(4))
expected.columns = mi.set_names(["c1", "c2"])
actual = pd.read_excel(mi_file, "mi_column_name", header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# see gh-11317
# "name_with_int" sheet
expected.columns = mi.set_levels([1, 2], level=1).set_names(["c1", "c2"])
actual = pd.read_excel(mi_file, "name_with_int", index_col=0, header=[0, 1])
tm.assert_frame_equal(actual, expected)
# "both_name" sheet
expected.columns = mi.set_names(["c1", "c2"])
expected.index = mi.set_names(["ilvl1", "ilvl2"])
actual = pd.read_excel(mi_file, "both_name", index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected)
# "both_skiprows" sheet
actual = pd.read_excel(
mi_file, "both_name_skiprows", index_col=[0, 1], header=[0, 1], skiprows=2
)
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex_header_only(self, read_ext):
# see gh-11733.
#
# Don't try to parse a header name if there isn't one.
mi_file = "testmultiindex" + read_ext
result = pd.read_excel(mi_file, "index_col_none", header=[0, 1])
exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")])
expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns)
tm.assert_frame_equal(result, expected)
def test_excel_old_index_format(self, read_ext):
# see gh-4679
filename = "test_index_name_pre17" + read_ext
# We detect headers to determine if index names exist, so
# that "index" name in the "names" version of the data will
# now be interpreted as rows that include null data.
data = np.array(
[
[None, None, None, None, None],
["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"],
]
)
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(
levels=[
["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"],
["R1", "R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"],
],
codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]],
names=[None, None],
)
si = Index(
["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None
)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(filename, "single_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(filename, "multi_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
# The analogous versions of the "names" version data
# where there are explicitly no names for the indices.
data = np.array(
[
["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"],
["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"],
["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"],
["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"],
["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"],
]
)
columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"]
mi = MultiIndex(
levels=[
["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"],
["R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"],
],
codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
names=[None, None],
)
si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(filename, "single_no_names", index_col=0)
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(filename, "multi_no_names", index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
def test_read_excel_bool_header_arg(self, read_ext):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
pd.read_excel("test1" + read_ext, header=arg)
def test_read_excel_chunksize(self, read_ext):
# GH 8011
with pytest.raises(NotImplementedError):
pd.read_excel("test1" + read_ext, chunksize=100)
def test_read_excel_skiprows_list(self, read_ext):
# GH 4903
actual = pd.read_excel(
"testskiprows" + read_ext, "skiprows_list", skiprows=[0, 2]
)
expected = DataFrame(
[
[1, 2.5, pd.Timestamp("2015-01-01"), True],
[2, 3.5, pd.Timestamp("2015-01-02"), False],
[3, 4.5, pd.Timestamp("2015-01-03"), False],
[4, 5.5, pd.Timestamp("2015-01-04"), True],
],
columns=["a", "b", "c", "d"],
)
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(
"testskiprows" + read_ext, "skiprows_list", skiprows=np.array([0, 2])
)
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows(self, read_ext):
# GH 16645
num_rows_to_pull = 5
actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull)
expected = pd.read_excel("test1" + read_ext)
expected = expected[:num_rows_to_pull]
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_greater_than_nrows_in_file(self, read_ext):
# GH 16645
expected = pd.read_excel("test1" + read_ext)
num_records_in_file = len(expected)
num_rows_to_pull = num_records_in_file + 10
actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull)
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_non_integer_parameter(self, read_ext):
# GH 16645
msg = "'nrows' must be an integer >=0"
with pytest.raises(ValueError, match=msg):
pd.read_excel("test1" + read_ext, nrows="5")
def test_read_excel_squeeze(self, read_ext):
# GH 12157
f = "test_squeeze" + read_ext
actual = pd.read_excel(f, "two_columns", index_col=0, squeeze=True)
expected = pd.Series([2, 3, 4], [4, 5, 6], name="b")
expected.index.name = "a"
tm.assert_series_equal(actual, expected)
actual = pd.read_excel(f, "two_columns", squeeze=True)
expected = pd.DataFrame({"a": [4, 5, 6], "b": [2, 3, 4]})
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(f, "one_column", squeeze=True)
expected = pd.Series([1, 2, 3], name="a")
tm.assert_series_equal(actual, expected)
class TestExcelFileRead:
@pytest.fixture(autouse=True)
def cd_and_set_engine(self, engine, datapath, monkeypatch):
"""
Change directory and set engine for ExcelFile objects.
"""
func = partial(pd.ExcelFile, engine=engine)
monkeypatch.chdir(datapath("io", "data", "excel"))
monkeypatch.setattr(pd, "ExcelFile", func)
def test_excel_passes_na(self, read_ext):
with pd.ExcelFile("test4" + read_ext) as excel:
parsed = pd.read_excel(
excel, "Sheet1", keep_default_na=False, na_values=["apple"]
)
expected = DataFrame(
[["NA"], [1], ["NA"], [np.nan], ["rabbit"]], columns=["Test"]
)
tm.assert_frame_equal(parsed, expected)
with | pd.ExcelFile("test4" + read_ext) | pandas.ExcelFile |
import dataclasses
import itertools
from typing import Dict, List
import datetime
from typing import Iterable
from typing import Iterator
from typing import Optional
from typing import Union
import pytest
from datapublic.common_fields import CommonFields
import pandas as pd
from datapublic.common_fields import DemographicBucket
from libs.datasets import data_source
from libs.datasets import taglib
from libs.datasets.sources import can_scraper_helpers as ccd_helpers
from libs.datasets.taglib import UrlStr
from libs.pipeline import Region
from tests import test_helpers
from tests.test_helpers import TimeseriesLiteral
# Match fields in the CAN Scraper DB
DEFAULT_LOCATION = 36 # FIPS is an int in the parquet file, not a str
DEFAULT_LOCATION_TYPE = "state"
DEFAULT_LOCATION_ID = Region.from_fips(str(DEFAULT_LOCATION)).location_id
DEFAULT_START_DATE = test_helpers.DEFAULT_START_DATE
def _make_iterator(maybe_iterable: Union[None, str, Iterable[str]]) -> Optional[Iterator[str]]:
if maybe_iterable is None:
return None
elif isinstance(maybe_iterable, str):
return itertools.repeat(maybe_iterable)
else:
return iter(maybe_iterable)
def build_can_scraper_dataframe(
data_by_variable: Dict[ccd_helpers.ScraperVariable, List[float]],
location=DEFAULT_LOCATION,
location_type=DEFAULT_LOCATION_TYPE,
location_id=DEFAULT_LOCATION_ID,
start_date=DEFAULT_START_DATE,
source_url: Union[None, str, Iterable[str]] = None,
source_name: Union[None, str, Iterable[str]] = None,
) -> pd.DataFrame:
"""Creates a DataFrame with the same structure as the CAN Scraper parquet file.
Args:
source_url: None to not include the column or a string for every observation or
an iterable of strings to add to each observation in the order created.
"""
source_url_iter = _make_iterator(source_url)
source_name_iter = _make_iterator(source_name)
start_date = datetime.datetime.fromisoformat(start_date)
rows = []
for variable, data in data_by_variable.items():
for i, value in enumerate(data):
date = start_date + datetime.timedelta(days=i)
row = {
"provider": variable.provider,
"dt": date,
"location_type": location_type,
"location_id": location_id,
"location": location,
"variable_name": variable.variable_name,
"measurement": variable.measurement,
"unit": variable.unit,
"age": variable.age,
"race": variable.race,
"ethnicity": variable.ethnicity,
"sex": variable.sex,
"value": value,
}
if source_url:
row["source_url"] = next(source_url_iter)
if source_name:
row["source_name"] = next(source_name_iter)
rows.append(row)
return | pd.DataFrame(rows) | pandas.DataFrame |
from pylab import *
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime
import requests
import pandas_datareader.data as web
from Create_PDF_Report import portfolio_report
ALPHA_VANTAGE_KEY = 'ENTER_KEY'
RESULT_DETAILED = True
USER_AGENT = {
'User-Agent': (
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36')
}
sesh = requests.Session()
sesh.headers.update(USER_AGENT)
def check_if_weekend(date):
def test(date):
try:
temp_date = date.strftime('%Y-%m-%d')
temp = data_set.loc[temp_date]
error = False
except:
date = date - datetime.timedelta(days=1)
error = True
return date, error
if date.weekday() == 6:
date = date - datetime.timedelta(days=2)
elif date.weekday() == 5:
date = date - datetime.timedelta(days=1)
try:
temp = data_set.loc[date.strftime('%Y-%m-%d')]
error = False
except:
error = True
while error == True:
date, error = test(date)
return date
def calculate_return(start_date, end_date):
global data_set
data_set = portfolio_main.historical_performance_stock()
portfolio_return = 0
try:
if data_set.all() != 0:
end_date_value = data_set.loc[end_date]
start_date_value = data_set.loc[start_date]
portfolio_return += ((float(end_date_value) / float(start_date_value)) - 1) * total_hist_p_allocation
except AttributeError:
i = 0
i = 0
while i < len(ava_fund_list):
url = "https://www.avanza.se/_api/fund-guide/chart/" + ava_fund_list_id[i] + "/" + start_date + "/" + end_date
response = requests.get(url)
dictr = response.json()
recs = dictr['dataSerie']
ava_fund_temp_data = pd.json_normalize(recs)
performance_ava_fund = float(ava_fund_temp_data.iloc[-1, 1]) / 100 * ava_fund_list_allocation[i]
portfolio_return += performance_ava_fund
i += 1
return portfolio_return
class Portfolio:
def historical_performance_all(self):
ava_fund = pd.read_csv('Avanza Fond ID.csv', index_col=0)
date_list = []
for item in self.position_performance:
date_list.append(item.index[0])
i_date = 0
while date_list[i_date] != max(date_list):
i_date += 1
i = i_date
if self.positions[i] not in ava_fund.index:
temp_data = self.position_performance[i].groupby(self.position_performance[i].index.to_period('M')).first()
temp_data = temp_data.dropna()
temp_data.rename({'Adj Close': 'y'}, axis=1, inplace=True)
temp_data.index.name = 'x'
temp_data = (temp_data.div(temp_data['y'][0]) - 1) * 100
else:
temp_data = self.position_performance[i]
temp_data = temp_data.groupby(temp_data.index.to_period('M')).first()
portfolio_historical_performance = temp_data['y'] * self.position_allocation[i]
i = 0
i += 1
while i < len(self.positions):
if i != i_date:
if self.positions[i] not in ava_fund.index:
temp_data = self.position_performance[i].groupby(
self.position_performance[i].index.to_period('M')).first()
temp_data = temp_data.dropna()
temp_data.rename({'Adj Close': 'y'}, axis=1, inplace=True)
temp_data.index.name = 'x'
temp_data = (temp_data.div(temp_data['y'][0]) - 1) * 100
else:
temp_data = self.position_performance[i]
temp_data = temp_data.groupby(temp_data.index.to_period('M')).first()
if portfolio_historical_performance.index[0] in temp_data.index:
data_point_first = int(temp_data.index.get_loc(portfolio_historical_performance.index[0]))
temp_data = temp_data.iloc[data_point_first:].div(temp_data.iloc[data_point_first, 0])
portfolio_historical_performance += temp_data['y'] * self.position_allocation[i]
else:
portfolio_historical_performance += temp_data['y'] * self.position_allocation[i]
i += 1
# Plotting Stuff
slower = np.ma.masked_where(portfolio_historical_performance > 0, portfolio_historical_performance)
negative_return = portfolio_historical_performance.copy()
negative_return[slower > 0] = np.nan
fig, ax = plt.subplots(figsize=(10, 5))
portfolio_historical_performance.plot(ax=ax, color="#348dc1") # Benchmark Colour ? "#fedd78"
negative_return.plot(ax=ax, color="darkred")
ax.set_ylabel('', fontweight='bold', fontsize=12, color="black")
ax.set_xlabel('')
ax.yaxis.set_label_coords(-.1, .5)
ax.grid(True)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fig.suptitle("Performance", y=.99, fontweight="bold",
fontsize=14, color="black")
ax.axhline(0, ls="-", lw=1,
color='gray', zorder=1)
ax.axhline(0, ls="--", lw=1,
color='black', zorder=2)
fig.set_facecolor('white')
ax.set_title("%s - %s" % (
portfolio_historical_performance.index[:1][0].strftime('%e %b \'%y'),
portfolio_historical_performance.index[-1:][0].strftime('%e %b \'%y')
), fontsize=12, color='gray')
ax.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, loc: "{:,}%".format(int(x))))
ax.set_facecolor('white')
ax.fill_between(portfolio_historical_performance.index, 0, portfolio_historical_performance,
where=portfolio_historical_performance >= 0, interpolate=True,
color="#348dc1", alpha=.25)
ax.fill_between(portfolio_historical_performance.index, 0, portfolio_historical_performance,
where=portfolio_historical_performance <= 0, interpolate=True,
color="red", alpha=.25)
fig.autofmt_xdate()
try:
fig.tight_layout()
# plt.subplots_adjust(hspace=0, bottom=0, top=1)
except Exception:
pass
fig.savefig("Portfolio_Return.png")
def historical_performance_stock(self):
global total_hist_p_allocation
total_hist_p_allocation = 0
ava_fund = pd.read_csv('Avanza Fond ID.csv', index_col=0)
if self.positions[0] not in ava_fund.index:
performance = self.position_performance[0]['Adj Close'].div(
self.position_performance[0]['Adj Close'][0]).dropna().mul(self.position_allocation[0])
total_hist_p_allocation += self.position_allocation[0]
else:
performance = 0
loc_perf_index = 1
while len(self.positions) > loc_perf_index:
if self.positions[loc_perf_index] not in ava_fund.index:
if len(self.positions) > loc_perf_index:
performance += self.position_performance[loc_perf_index]['Adj Close'].div(
self.position_performance[loc_perf_index]['Adj Close'][0]).dropna().mul(
self.position_allocation[loc_perf_index])
total_hist_p_allocation += self.position_allocation[loc_perf_index]
loc_perf_index += 1
'''
fig, ax = plt.subplots()
ax.plot(performance, '-')
ax.grid(True)
ax.set_xlabel('Date')
ax.set_ylabel('Price')
fig.suptitle('Performance')
fig.autofmt_xdate()
plt.show()
'''
if total_hist_p_allocation == 0:
performance = 0
return performance
def stress_test(self):
draw_downs = 'Drawback Periods:\n\nGlobal Financial Crisis (10.09.2007-03.09.2009)\nPerformance: ' + str(round(
calculate_return("2007-10-09", "2009-03-09") * 100,
2)) + '% SP500: -54.89%\nU.S. Downgrade (04.30.2011-10.03.2011)\nPerformance: ' + str(round(
calculate_return("2011-04-29", "2011-10-03") * 100,
2)) + '% SP500: -18.64%\nGlobal Slowdown Fears (05.22.2015-08.25.2015)\nPerformance: ' + str(round(
calculate_return("2015-05-22", "2015-08-25") * 100,
2)) + '% SP500: -11.89%\nOil, U.S. Recession Fears (11.04.2015-02.11.2016)\nPerformance: ' + str(round(
calculate_return("2015-11-04", "2016-02-11") * 100,
2)) + '% SP500: -12.71%\nRising Rates/Trade (09.21.2018-12.24.2018) \nPerformance: ' + str(round(
calculate_return("2018-09-21", "2018-12-24") * 100,
2)) + '% SP500: -19.36%\nCovid-19 Concerns Begin (02.19.2020-03.23.2020) \nPerformance: ' + str(round(
calculate_return("2020-02-19", "2020-03-23") * 100, 2)) + '% SP500: -33.47%'
rebounds = 'Rebound Periods:\n\nRecession Ends (03.10.2009-04.23.2010)\nPerformance: ' + str(round(
calculate_return("2009-03-10", "2010-04-23") * 100,
2)) + '% SP500: 84.21%\nFlash Crash Rebound/European Relief (07.02.2010-02.18.2011)\nPerformance: ' + str(
round(
calculate_return("2010-07-02", "2011-02-18") * 100,
2)) + '% SP500: 33.02%\nCentral Bank QE (12.30.2011-12.29.2014)\nPerformance: ' + str(round(
calculate_return("2011-12-30", "2014-12-29") * 100,
2)) + '% SP500: 55.40%\nChina Easing/Oil rebound/Weaker USD (02.12.2016-01.26.2018)\nPerformance: ' + str(
round(
calculate_return("2016-02-12", "2018-01-26") * 100,
2)) + '% SP500: 63.49%\nFed Eases (12.26.2018-12.27.2019)\nPerformance: ' + str(round(
calculate_return("2018-12-26", "2019-12-27") * 100,
2)) + '% SP500: 40.63%\nFiscal/Fed Covid-19 Response (03.24.2020-06.08.2020)\nPerformance: ' + str(round(
calculate_return("2020-03-24", "2020-06-08") * 100, 2)) + '% SP500: 40.63%'
falling_ir = 'Falling Interest Rates:\n\nU.S. Downgrade (02.09.2011-09.22.2011)\nPerformance: ' + str(round(
calculate_return("2011-02-09", "2011-09-22") * 100,
2)) + '% (-2.03)\nEurope Debt Crisis/Flight to Quality (03.20.2012-07.25.2012)\nPerformance: ' + str(round(
calculate_return("2012-03-20", "2012-07-25") * 100,
2)) + '% (-0.93)\nWeaker Growth/Low Inflation (01.09.2014-02.02.2015)\nPerformance: ' + str(round(
calculate_return("2014-01-09", "2015-02-02") * 100,
2)) + '% (-1.33)\nGlobal Slowdown Fear (06.11.2015-07.05.2016)\nPerformance: ' + str(round(
calculate_return("2015-06-11", "2016-07-05") * 100,
2)) + '% (-1.13)\nEscalated U.S.-China Trade War (11.09.2018-09.04.2019)\nPerformance: ' + str(round(
calculate_return("2018-11-09", "2019-09-04") * 100,
2)) + '% (-1.77)\nCovid-19 Concerns Begin (01.21.2020-03.09.2020)\nPerformance: ' + str(round(
calculate_return("2020-01-21", "2020-03-09") * 100, 2)) + '% (-1.30)'
rising_ir = 'Rising Interest Rates (Change in RFR)\n\n10.06.2010-02.08.2011\nPerformance: ' + str(round(
calculate_return("2010-10-06", "2011-08-02") * 100,
2)) + '% (+1.34)\n05.02.2013-09.05.2013\nPerformance: ' + str(round(
calculate_return("2013-05-02", "2013-09-05") * 100,
2)) + '% (+1.32)\n07.08.2015-12.15.2015\nPerformance: ' + str(round(
calculate_return("2015-07-08", "2015-12-15") * 100,
2)) + '% (+1.23)\n09.07.2017-05.17.2018\nPerformance: ' + str(round(
calculate_return("2017-09-07", "2018-05-17") * 100,
2)) + '% (+1.06)\nCovid-19 Recovery/Inflation Concerns (03.09.2020-03.19.2021)\nPerformance: ' + str(round(
calculate_return("2020-03-09", "2021-03-19") * 100, 2)) + '% (+1.20)'
return draw_downs, rebounds, falling_ir, rising_ir
def pdf_data_generate(self):
ava_fund = pd.read_csv('Avanza Fond ID.csv', index_col=0)
today = datetime.datetime.now()
today_date = check_if_weekend(today)
today_date = today_date.strftime('%Y-%m-%d')
date_one_y_ago = today - datetime.timedelta(days=365)
date_one_y_ago = check_if_weekend(date_one_y_ago)
date_one_y_ago = date_one_y_ago.strftime('%Y-%m-%d')
date_one_m_ago = today - datetime.timedelta(days=30)
date_one_m_ago = check_if_weekend(date_one_m_ago)
date_one_m_ago = date_one_m_ago.strftime('%Y-%m-%d')
date_three_m_ago = today - datetime.timedelta(days=90)
date_three_m_ago = check_if_weekend(date_three_m_ago)
date_three_m_ago = date_three_m_ago.strftime('%Y-%m-%d')
date_three_y_ago = today - datetime.timedelta(days=1095)
date_three_y_ago = check_if_weekend(date_three_y_ago)
date_three_y_ago = date_three_y_ago.strftime('%Y-%m-%d')
date_begin_of_year = today.date().replace(month=1, day=1)
date_begin_of_year = check_if_weekend(date_begin_of_year)
date_begin_of_year = date_begin_of_year.strftime('%Y-%m-%d')
performance_1m = str(round(calculate_return(date_one_m_ago, today_date) * 100, 2))
performance_3m = str(round(calculate_return(date_three_m_ago, today_date) * 100, 2))
performance_ytd = str(round(calculate_return(date_begin_of_year, today_date) * 100, 2))
performance_1y = str(round(calculate_return(date_one_y_ago, today_date) * 100, 2))
performance_3y = str(round(calculate_return(date_three_y_ago, today_date) * 100, 2))
i_ava = 0
i = 0
holding_overview_list = []
for position in positions_list:
position_current_list = []
position_current_list.append(position)
data_frame_temp = self.position_performance[i]
if position not in ava_fund.index:
performance_current_1y = str(round((float(data_frame_temp['Adj Close'][-1]) / float(
data_frame_temp['Adj Close'].loc[date_one_y_ago]) - 1) * 100, 2)) + "%"
performance_current_3y = str(round((float(data_frame_temp['Adj Close'][-1]) / float(
data_frame_temp['Adj Close'].loc[date_three_y_ago]) - 1) * 100, 2)) + "%"
else:
json_dict = ava_fund_list_info[i_ava]
try:
performance_current_1y = str(round(json_dict['developmentOneYear'], 2)) + "%"
performance_current_3y = str(round(json_dict['developmentThreeYears'], 2)) + "%"
except:
performance_current_1y = 'Error'
performance_current_3y = 'Error'
i_ava += 1
position_current_list.append(performance_current_1y)
position_current_list.append(performance_current_3y)
position_current_list.append(str(self.position_allocation[i] * 100) + "%")
holding_overview_list.append(position_current_list)
i += 1
return performance_1m, performance_3m, performance_ytd, performance_1y, performance_3y, holding_overview_list
def __init__(self, list_of_positions, allocation_of_positions): # initalized function
self.positions = list_of_positions
self.position_allocation = allocation_of_positions
global ava_fund_list, ava_fund_list_info, ava_fund_list_allocation, ava_fund_list_id, in_ava_fund, stock_details, stock_overview, stock_ratings, stock_forecast
position_data_frame_list = []
ava_fund_list = []
ava_fund_list_info = []
ava_fund_list_allocation = []
ava_fund_list_id = []
in_ava_fund = []
stock_details = []
stock_temp = []
stock_overview = []
stock_ratings = []
stock_forecast = []
ava_fund = pd.read_csv('Avanza Fond ID.csv', index_col=0)
while len(self.positions) > len(position_data_frame_list):
if self.positions[len(position_data_frame_list)] in ava_fund.index:
fund_id = ava_fund.loc[self.positions[len(position_data_frame_list)], 'ID']
url = 'https://www.avanza.se/_api/fund-guide/guide/' + fund_id
response = requests.get(url)
ava_fund_list.append(self.positions[len(position_data_frame_list)])
ava_fund_list_id.append(fund_id)
ava_fund_list_info.append(response.json())
ava_fund_list_allocation.append(self.position_allocation[len(position_data_frame_list)])
url = 'https://www.avanza.se/_api/fund-guide/chart/' + fund_id + '/infinity'
response = requests.get(url)
dictr = response.json()
recs = dictr['dataSerie']
temp_data = pd.json_normalize(recs)
i = 0
for item in temp_data['x']:
test = datetime.datetime.fromtimestamp(int(float(item) / 1000))
temp_data.iloc[i, 0] = f"{test:%Y-%m-%d}"
i += 1
temp_data['x'] = | pd.to_datetime(temp_data['x']) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 24 21:12:53 2020
@author: daniel
"""
## [1]
# @title Imports (run this cell)
from __future__ import print_function
import numpy as np
import pandas as pd
import collections
#from mpl_toolkits.mplot3d import Axes3D
from IPython import display
from matplotlib import pyplot as plt
import sklearn
import sklearn.manifold
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
tf.logging.set_verbosity(tf.logging.ERROR)
import random
random.seed(12345)
from sklearn.preprocessing import StandardScaler
import umap
# Add some convenience functions to Pandas DataFrame.
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.3f}'.format
def mask(df, key, function):
"""Returns a filtered dataframe, by applying function to key"""
return df[function(df[key])]
def flatten_cols(df):
df.columns = [' '.join(col).strip() for col in df.columns.values]
return df
pd.DataFrame.mask = mask
pd.DataFrame.flatten_cols = flatten_cols
# Altair.
import altair as alt
alt.data_transformers.enable('default', max_rows=None)
#alt.renderers.enable('notebook') # For Jupyter Notebook
alt.renderers.enable('altair_viewer') # For Spyder
USER_RATINGS = False
## [2]
# @title Load the MovieLens data (run this cell).
import os
os.chdir('/Users/daniel/Data-Science/Data/MovieLens/Education/ml-100k')
# Load each data set (users, movies, and ratings).
users_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
users = pd.read_csv(
'ml-100k/u.user', sep='|', names=users_cols, encoding='latin-1')
ratings_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv(
'ml-100k/u.data', sep='\t', names=ratings_cols, encoding='latin-1')
# The movies file contains a binary feature for each genre.
genre_cols = [
"genre_unknown", "Action", "Adventure", "Animation", "Children", "Comedy",
"Crime", "Documentary", "Drama", "Fantasy", "Film-Noir", "Horror",
"Musical", "Mystery", "Romance", "Sci-Fi", "Thriller", "War", "Western"
]
movies_cols = [
'movie_id', 'title', 'release_date', "video_release_date", "imdb_url"
] + genre_cols
movies = pd.read_csv(
'ml-100k/u.item', sep='|', names=movies_cols, encoding='latin-1')
# Since the ids start at 1, we shift them to start at 0.
users["user_id"] = users["user_id"].apply(lambda x: str(x-1))
movies["movie_id"] = movies["movie_id"].apply(lambda x: str(x-1))
movies["year"] = movies['release_date'].apply(lambda x: str(x).split('-')[-1])
ratings["movie_id"] = ratings["movie_id"].apply(lambda x: str(x-1))
ratings["user_id"] = ratings["user_id"].apply(lambda x: str(x-1))
ratings["rating"] = ratings["rating"].apply(lambda x: float(x))
# Compute the number of movies to which a genre is assigned.
genre_occurences = movies[genre_cols].sum().to_dict()
# Since some movies can belong to more than one genre, we create different
# 'genre' columns as follows:
# - all_genres: all the active genres of the movie.
# - genre: randomly sampled from the active genres.
def mark_genres(movies, genres):
def get_random_genre(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return np.random.choice(active)
def get_all_genres(gs):
active = [genre for genre, g in zip(genres, gs) if g==1]
if len(active) == 0:
return 'Other'
return '-'.join(active)
movies['genre'] = [
get_random_genre(gs) for gs in zip(*[movies[genre] for genre in genres])]
movies['all_genres'] = [
get_all_genres(gs) for gs in zip(*[movies[genre] for genre in genres])]
mark_genres(movies, genre_cols)
# Create one merged DataFrame containing all the movielens data.
movielens = ratings.merge(movies, on='movie_id').merge(users, on='user_id')
# Utility to split the data into training and test sets.
def split_dataframe(df, holdout_fraction=0.1):
"""Splits a DataFrame into training and test sets.
Args:
df: a dataframe.
holdout_fraction: fraction of dataframe rows to use in the test set.
Returns:
train: dataframe for training
test: dataframe for testing
"""
test = df.sample(frac=holdout_fraction, replace=False)
train = df[~df.index.isin(test.index)]
return train, test
## [3]
users.describe()
## [4]
users.describe(include=[np.object])
## [5]
# @title Altair visualization code (run this cell)
# The following functions are used to generate interactive Altair charts.
# We will display histograms of the data, sliced by a given attribute.
# Create filters to be used to slice the data.
occupation_filter = alt.selection_multi(fields=["occupation"])
occupation_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y("occupation:N"),
color=alt.condition(
occupation_filter,
alt.Color("occupation:N", scale=alt.Scale(scheme='category20')),
alt.value("lightgray")),
).properties(width=300, height=300, selection=occupation_filter)
# A function that generates a histogram of filtered data.
def filtered_hist(field, label, filter):
"""Creates a layered chart of histograms.
The first layer (light gray) contains the histogram of the full data, and the
second contains the histogram of the filtered data.
Args:
field: the field for which to generate the histogram.
label: String label of the histogram.
filter: an alt.Selection object to be used to filter the data.
"""
base = alt.Chart().mark_bar().encode(
x=alt.X(field, bin=alt.Bin(maxbins=10), title=label),
y="count()",
).properties(
width=300,
)
return alt.layer(
base.transform_filter(filter),
base.encode(color=alt.value('lightgray'), opacity=alt.value(.7)),
).resolve_scale(y='independent')
## [6]
users_ratings = (
ratings
.groupby('user_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols()
.merge(users, on='user_id')
)
# Create a chart for the count, and one for the mean.
alt.hconcat(
filtered_hist('rating count', '# ratings / user', occupation_filter),
filtered_hist('rating mean', 'mean user rating', occupation_filter),
occupation_chart,
data=users_ratings)
## [7]
movies_ratings = movies.merge(
ratings
.groupby('movie_id', as_index=False)
.agg({'rating': ['count', 'mean']})
.flatten_cols(),
on='movie_id')
genre_filter = alt.selection_multi(fields=['genre'])
genre_chart = alt.Chart().mark_bar().encode(
x="count()",
y=alt.Y('genre'),
color=alt.condition(
genre_filter,
alt.Color("genre:N"),
alt.value('lightgray'))
).properties(height=300, selection=genre_filter)
## [8]
(movies_ratings[['title', 'rating count', 'rating mean']]
.sort_values('rating count', ascending=False)
.head(10))
## [9]
(movies_ratings[['title', 'rating count', 'rating mean']]
.mask('rating count', lambda x: x > 20)
.sort_values('rating mean', ascending=False)
.head(10))
## [10]
# Display the number of ratings and average rating per movie.
alt.hconcat(
filtered_hist('rating count', '# ratings / movie', genre_filter),
filtered_hist('rating mean', 'mean movie rating', genre_filter),
genre_chart,
data=movies_ratings)
## [11]
#@title Solution
def build_rating_sparse_tensor(ratings_df):
"""
Args:
ratings_df: a pd.DataFrame with `user_id`, `movie_id` and `rating` columns.
Returns:
a tf.SparseTensor representing the ratings matrix.
"""
indices = ratings_df[['user_id', 'movie_id']].values
values = ratings_df['rating'].values
return tf.SparseTensor(
indices=indices,
values=values,
dense_shape=[users.shape[0], movies.shape[0]])
## [12]
#@title Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.gather_nd(
tf.matmul(user_embeddings, movie_embeddings, transpose_b=True),
sparse_ratings.indices)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
## [13]
#@title Alternate Solution
def sparse_mean_square_error(sparse_ratings, user_embeddings, movie_embeddings):
"""
Args:
sparse_ratings: A SparseTensor rating matrix, of dense_shape [N, M]
user_embeddings: A dense Tensor U of shape [N, k] where k is the embedding
dimension, such that U_i is the embedding of user i.
movie_embeddings: A dense Tensor V of shape [M, k] where k is the embedding
dimension, such that V_j is the embedding of movie j.
Returns:
A scalar Tensor representing the MSE between the true ratings and the
model's predictions.
"""
predictions = tf.reduce_sum(
tf.gather(user_embeddings, sparse_ratings.indices[:, 0]) *
tf.gather(movie_embeddings, sparse_ratings.indices[:, 1]),
axis=1)
loss = tf.losses.mean_squared_error(sparse_ratings.values, predictions)
return loss
## [14]
# @title CFModel helper class (run this cell)
class CFModel(object):
"""Simple class that represents a collaborative filtering model"""
def __init__(self, embedding_vars, loss, metrics=None):
"""Initializes a CFModel.
Args:
embedding_vars: A dictionary of tf.Variables.
loss: A float Tensor. The loss to optimize.
metrics: optional list of dictionaries of Tensors. The metrics in each
dictionary will be plotted in a separate figure during training.
"""
self._embedding_vars = embedding_vars
self._loss = loss
self._metrics = metrics
self._embeddings = {k: None for k in embedding_vars}
self._session = None
@property
def embeddings(self):
"""The embeddings dictionary."""
return self._embeddings
def train(self, num_iterations=100, learning_rate=1.0, plot_results=True,
optimizer=tf.train.GradientDescentOptimizer):
"""Trains the model.
Args:
iterations: number of iterations to run.
learning_rate: optimizer learning rate.
plot_results: whether to plot the results at the end of training.
optimizer: the optimizer to use. Default to GradientDescentOptimizer.
Returns:
The metrics dictionary evaluated at the last iteration.
"""
with self._loss.graph.as_default():
opt = optimizer(learning_rate)
train_op = opt.minimize(self._loss)
local_init_op = tf.group(
tf.variables_initializer(opt.variables()),
tf.local_variables_initializer())
if self._session is None:
self._session = tf.Session()
with self._session.as_default():
self._session.run(tf.global_variables_initializer())
self._session.run(tf.tables_initializer())
tf.train.start_queue_runners()
with self._session.as_default():
local_init_op.run()
iterations = []
metrics = self._metrics or ({},)
metrics_vals = [collections.defaultdict(list) for _ in self._metrics]
# Train and append results.
for i in range(num_iterations + 1):
_, results = self._session.run((train_op, metrics))
if (i % 10 == 0) or i == num_iterations:
print("\r iteration %d: " % i + ", ".join(
["%s=%f" % (k, v) for r in results for k, v in r.items()]),
end='')
iterations.append(i)
for metric_val, result in zip(metrics_vals, results):
for k, v in result.items():
metric_val[k].append(v)
for k, v in self._embedding_vars.items():
self._embeddings[k] = v.eval()
if plot_results:
# Plot the metrics.
num_subplots = len(metrics)+1
fig = plt.figure()
fig.set_size_inches(num_subplots*10, 8)
for i, metric_vals in enumerate(metrics_vals):
ax = fig.add_subplot(1, num_subplots, i+1)
for k, v in metric_vals.items():
ax.plot(iterations, v, label=k)
ax.set_xlim([1, num_iterations])
ax.legend()
return results
## [15]
#@title Solution
def build_model(ratings, embedding_dim=3, init_stddev=1.):
"""
Args:
ratings: a DataFrame of the ratings
embedding_dim: the dimension of the embedding vectors.
init_stddev: float, the standard deviation of the random initial embeddings.
Returns:
model: a CFModel.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
# Initialize the embeddings using a normal distribution.
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
train_loss = sparse_mean_square_error(A_train, U, V)
test_loss = sparse_mean_square_error(A_test, U, V)
metrics = {
'train_error': train_loss,
'test_error': test_loss
}
embeddings = {
"user_id": U,
"movie_id": V
}
return CFModel(embeddings, train_loss, [metrics])
## [16]
# Build the CF model and train it.
model = build_model(ratings, embedding_dim=30, init_stddev=0.5)
model.train(num_iterations=3000, learning_rate=10.)
## [17]
#@title Solution
DOT = 'dot'
COSINE = 'cosine'
def compute_scores(query_embedding, item_embeddings, measure=DOT):
"""Computes the scores of the candidates given a query.
Args:
query_embedding: a vector of shape [k], representing the query embedding.
item_embeddings: a matrix of shape [N, k], such that row i is the embedding
of item i.
measure: a string specifying the similarity measure to be used. Can be
either DOT or COSINE.
Returns:
scores: a vector of shape [N], such that scores[i] is the score of item i.
"""
u = query_embedding
V = item_embeddings
if measure == COSINE:
V = V / np.linalg.norm(V, axis=1, keepdims=True)
u = u / np.linalg.norm(u)
scores = u.dot(V.T)
return scores
## [18]
# @title User recommendations and nearest neighbors (run this cell)
def user_recommendations(model, measure=DOT, exclude_rated=False, k=10):
if USER_RATINGS:
scores = compute_scores(
model.embeddings["user_id"][943], model.embeddings["movie_id"], measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'movie_id': movies['movie_id'],
'titles': movies['title'],
'genres': movies['all_genres'],
})
if exclude_rated:
# remove movies that are already rated
rated_movies = ratings[ratings.user_id == "943"]["movie_id"].values
df = df[df.movie_id.apply(lambda movie_id: movie_id not in rated_movies)]
display.display(df.sort_values([score_key], ascending=False).head(k))
def movie_neighbors(model, title_substring, measure=DOT, k=10):
# Search for movie ids that match the given substring.
ids = movies[movies['title'].str.contains(title_substring)].index.values
titles = movies.iloc[ids]['title'].values
if len(titles) == 0:
raise ValueError("Found no movies with title %s" % title_substring)
print("Nearest neighbors of : %s." % titles[0])
if len(titles) > 1:
print("[Found more than one matching movie. Other candidates: {}]".format(
", ".join(titles[1:])))
movie_id = ids[0]
scores = compute_scores(
model.embeddings["movie_id"][movie_id], model.embeddings["movie_id"],
measure)
score_key = measure + ' score'
df = pd.DataFrame({
score_key: list(scores),
'titles': movies['title'],
'genres': movies['all_genres']
})
display.display(df.sort_values([score_key], ascending=False).head(k))
## [19]
movie_neighbors(model, "Toy Story", DOT)
movie_neighbors(model, "Toy Story", COSINE)
## [20]
# @title Embedding Visualization code (run this cell)
def movie_embedding_norm(models):
"""Visualizes the norm and number of ratings of the movie embeddings.
Args:
model: A MFModel object.
"""
if not isinstance(models, list):
models = [models]
df = pd.DataFrame({
'title': movies['title'],
'genre': movies['genre'],
'num_ratings': movies_ratings['rating count'],
})
charts = []
brush = alt.selection_interval()
for i, model in enumerate(models):
norm_key = 'norm'+str(i)
df[norm_key] = np.linalg.norm(model.embeddings["movie_id"], axis=1)
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x='num_ratings',
y=norm_key,
color=alt.condition(brush, alt.value('#4c78a8'), alt.value('lightgray'))
).properties(
selection=nearest).add_selection(brush)
text = alt.Chart().mark_text(align='center', dx=5, dy=-5).encode(
x='num_ratings', y=norm_key,
text=alt.condition(nearest, 'title', alt.value('')))
charts.append(alt.layer(base, text))
return alt.hconcat(*charts, data=df)
def visualize_movie_embeddings(data, x, y):
nearest = alt.selection(
type='single', encodings=['x', 'y'], on='mouseover', nearest=True,
empty='none')
base = alt.Chart().mark_circle().encode(
x=x,
y=y,
color=alt.condition(genre_filter, "genre", alt.value("whitesmoke")),
).properties(
width=600,
height=600,
selection=nearest)
text = alt.Chart().mark_text(align='left', dx=5, dy=-5).encode(
x=x,
y=y,
text=alt.condition(nearest, 'title', alt.value('')))
return alt.hconcat(alt.layer(base, text), genre_chart, data=data)
def tsne_movie_embeddings(model):
"""Visualizes the movie embeddings, projected using t-SNE with Cosine measure.
Args:
model: A MFModel object.
"""
tsne = sklearn.manifold.TSNE(
n_components=2, perplexity=40, metric='cosine', early_exaggeration=10.0,
init='pca', verbose=True, n_iter=400)
print('Running t-SNE...')
V_proj = tsne.fit_transform(model.embeddings["movie_id"])
movies.loc[:,'x'] = V_proj[:, 0]
movies.loc[:,'y'] = V_proj[:, 1]
return visualize_movie_embeddings(movies, 'x', 'y')
# https://umap-learn.readthedocs.io/en/latest/basic_usage.html#penguin-data
def umap_movie_embeddings(model):
reducer = umap.UMAP()
print('Running UMAP...')
V_proj = reducer.fit_transform(StandardScaler().fit_transform(model.embeddings["movie_id"]))
movies.loc[:,'x'] = V_proj[:, 0]
movies.loc[:,'y'] = V_proj[:, 1]
return visualize_movie_embeddings(movies, 'x', 'y')
## [21]
movie_embedding_norm(model)
## [22]
#@title Solution
model_lowinit = build_model(ratings, embedding_dim=30, init_stddev=0.05)
model_lowinit.train(num_iterations=3000, learning_rate=10.)
movie_neighbors(model_lowinit, "Toy Story", DOT)
movie_neighbors(model_lowinit, "Toy Story", COSINE)
movie_embedding_norm([model, model_lowinit])
## [23]
tsne_movie_embeddings(model_lowinit)
## [24]
umap_movie_embeddings(model_lowinit)
## [25]
# @title Solution
def gravity(U, V):
"""Creates a gravity loss given two embedding matrices."""
return 1. / (U.shape[0].value*V.shape[0].value) * tf.reduce_sum(
tf.matmul(U, U, transpose_a=True) * tf.matmul(V, V, transpose_a=True))
def build_regularized_model(
ratings, embedding_dim=3, regularization_coeff=.1, gravity_coeff=1.,
init_stddev=0.1):
"""
Args:
ratings: the DataFrame of movie ratings.
embedding_dim: The dimension of the embedding space.
regularization_coeff: The regularization coefficient lambda.
gravity_coeff: The gravity regularization coefficient lambda_g.
Returns:
A CFModel object that uses a regularized loss.
"""
# Split the ratings DataFrame into train and test.
train_ratings, test_ratings = split_dataframe(ratings)
# SparseTensor representation of the train and test datasets.
A_train = build_rating_sparse_tensor(train_ratings)
A_test = build_rating_sparse_tensor(test_ratings)
U = tf.Variable(tf.random_normal(
[A_train.dense_shape[0], embedding_dim], stddev=init_stddev))
V = tf.Variable(tf.random_normal(
[A_train.dense_shape[1], embedding_dim], stddev=init_stddev))
error_train = sparse_mean_square_error(A_train, U, V)
error_test = sparse_mean_square_error(A_test, U, V)
gravity_loss = gravity_coeff * gravity(U, V)
regularization_loss = regularization_coeff * (
tf.reduce_sum(U*U)/U.shape[0].value + tf.reduce_sum(V*V)/V.shape[0].value)
total_loss = error_train + regularization_loss + gravity_loss
losses = {
'train_error_observed': error_train,
'test_error_observed': error_test,
}
loss_components = {
'observed_loss': error_train,
'regularization_loss': regularization_loss,
'gravity_loss': gravity_loss,
}
embeddings = {"user_id": U, "movie_id": V}
return CFModel(embeddings, total_loss, [losses, loss_components])
## [26]
reg_model = build_regularized_model(
ratings, regularization_coeff=0.1, gravity_coeff=1.0, embedding_dim=35,
init_stddev=.05)
reg_model.train(num_iterations=3000, learning_rate=20.)
## [27]
user_recommendations(reg_model, DOT, exclude_rated=True, k=10)
## [27]
movie_neighbors(reg_model, "Toy Story", DOT)
movie_neighbors(reg_model, "Toy Story", COSINE)
## [29]
movie_embedding_norm([model, model_lowinit, reg_model])
## [30]
# Visualize the embeddings
tsne_movie_embeddings(reg_model)
## [31]
umap_movie_embeddings(reg_model)
## [32]
rated_movies = (ratings[["user_id", "movie_id"]]
.groupby("user_id", as_index=False)
.aggregate(lambda x: list(x)))
rated_movies.head()
## [33]
#@title Batch generation code (run this cell)
years_dict = {
movie: year for movie, year in zip(movies["movie_id"], movies["year"])
}
genres_dict = {
movie: genres.split('-')
for movie, genres in zip(movies["movie_id"], movies["all_genres"])
}
def make_batch(ratings, batch_size):
"""Creates a batch of examples.
Args:
ratings: A DataFrame of ratings such that examples["movie_id"] is a list of
movies rated by a user.
batch_size: The batch size.
"""
def pad(x, fill):
return | pd.DataFrame.from_dict(x) | pandas.DataFrame.from_dict |
#Importing the required packages
from flask import Flask, render_template, request
import os
import pandas as pd
from pandas import ExcelFile
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import *
from sklearn.metrics import *
from sklearn.model_selection import cross_val_score
import itertools
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
app = Flask(__name__)
#Routing to initial home page
@app.route('/')
def home():
return render_template('home.html')
@app.route('/admin_login')
def admin_login():
return render_template('admin_login.html')
@app.route('/admin', methods=['GET','POST'])
def admin():
user=request.form['un']
pas=request.form['pw']
cr=pd.read_excel('admin_cred.xlsx')
un=np.asarray(cr['Username']).tolist()
pw=np.asarray(cr['Password']).tolist()
cred = dict(zip(un, pw))
if user in un:
if(cred[user]==pas):
return render_template('admin.html')
else:
k=1
return render_template('admin_login.html',k=k)
else:
k=1
return render_template('admin_login.html',k=k)
@app.route('/admin_printed', methods=['GET','POST'])
def admin_printed():
trainfile=request.files['admin_doc']
t=pd.read_excel(trainfile)
t.to_excel('trainfile.xlsx')
return render_template('admin_printed.html')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/index', methods=['GET','POST'])
def index():
user=request.form['un']
pas=request.form['pw']
cr=pd.read_excel('cred.xlsx')
un=np.asarray(cr['Username']).tolist()
pw=np.asarray(cr['Password']).tolist()
cred = dict(zip(un, pw))
if user in un:
if(cred[user]==pas):
return render_template('index.html')
else:
k=1
return render_template('login.html',k=k)
else:
k=1
return render_template('login.html',k=k)
#Routing to page when File Upload is selected
@app.route('/file_upload')
def file_upload():
return render_template("file_upload.html")
@app.route('/upload_printed', methods=['GET','POST'])
def upload_printed():
abc=request.files['printed_doc']
test1=pd.read_excel(abc)
test=test1
train=pd.read_excel('trainfile.xlsx')
train['TenurePerJob']=0
for i in range(0,len(train)):
if train.loc[i,'NumCompaniesWorked']>0:
train.loc[i,'TenurePerJob']=train.loc[i,'TotalWorkingYears']/train.loc[i,'NumCompaniesWorked']
a=np.median(train['MonthlyIncome'])
train['CompRatioOverall']=train['MonthlyIncome']/a
full_col_names=train.columns.tolist()
num_col_names=train.select_dtypes(include=[np.int64,np.float64]).columns.tolist()
num_cat_col_names=['Education','JobInvolvement','JobLevel','StockOptionLevel']
target=['Attrition']
num_col_names=list(set(num_col_names)-set(num_cat_col_names))
cat_col_names=list(set(full_col_names)-set(num_col_names)-set(target))
#print("total no of numerical features:",len(num_col_names))
#print("total no of categorical & ordered features:",len(cat_col_names))
cat_train=train[cat_col_names]
num_train=train[num_col_names]
for col in num_col_names:
if num_train[col].skew()>0.80:
num_train[col]=np.log1p(num_train[col])
for col in cat_col_names:
col_dummies=pd.get_dummies(cat_train[col],prefix=col)
cat_train=pd.concat([cat_train,col_dummies],axis=1)
Attrition={'Yes':1,'No':0}
train.Attrition=[Attrition[item] for item in train.Attrition]
cat_train.drop(cat_col_names,axis=1,inplace=True)
final_train=pd.concat([num_train,cat_train],axis=1)
final_train['pr_mean_psh'] = final_train['PerformanceRating'].add(final_train['PercentSalaryHike'])
final_train['pr_mean_psh']=final_train['pr_mean_psh']/2
final_train.drop(labels=['PerformanceRating','PercentSalaryHike'],axis=1,inplace=True)
df1=final_train
for col in list(df1):
df1[col]=df1[col]/df1[col].max()
empnum=test['EmployeeNumber']
test['TenurePerJob']=0
for i in range(0,len(test)):
if test.loc[i,'NumCompaniesWorked']>0:
test.loc[i,'TenurePerJob']=test.loc[i,'TotalWorkingYears']/test.loc[i,'NumCompaniesWorked']
a=np.median(test['MonthlyIncome'])
test['CompRatioOverall']=test['MonthlyIncome']/a
test.drop(labels=['EmployeeNumber'],axis=1,inplace=True)
#test.drop(labels=['EmployeeCount','EmployeeNumber','Over18','StandardHours'],axis=1,inplace=True)
full_col_names=test.columns.tolist()
num_col_names=test.select_dtypes(include=[np.int64,np.float64]).columns.tolist()
num_cat_col_names=['Education','JobInvolvement','JobLevel','StockOptionLevel']
target=['Attrition']
num_col_names=list(set(num_col_names)-set(num_cat_col_names))
cat_col_names=list(set(full_col_names)-set(num_col_names)-set(target))
#print("total no of numerical features:",len(num_col_names))
#print("total no of categorical & ordered features:",len(cat_col_names))
cat_test=test[cat_col_names]
num_test=test[num_col_names]
for col in num_col_names:
if num_test[col].skew()>0.80:
num_test[col]=np.log1p(num_test[col])
for col in cat_col_names:
col_dummies=pd.get_dummies(cat_test[col],prefix=col)
cat_test=pd.concat([cat_test,col_dummies],axis=1)
cat_test.drop(cat_col_names,axis=1,inplace=True)
final_test=pd.concat([num_test,cat_test],axis=1)
final_test['pr_mean_psh'] = final_test['PerformanceRating'].add(final_test['PercentSalaryHike'])
final_test['pr_mean_psh']=final_test['pr_mean_psh']/2
final_test.drop(labels=['PerformanceRating','PercentSalaryHike'],axis=1,inplace=True)
#final_test.drop(labels=['HourlyRate','MonthlyRate','DailyRate'],axis=1,inplace=True)
#final_test.drop(labels=['Gender_Male','Gender_Female'],axis=1,inplace=True)
#final_test.drop(labels=['Department_Human Resources','Department_Research & Development','Department_Sales',],axis=1,inplace=True)
#final_test.drop(labels=['WorkLifeBalance_1','WorkLifeBalance_2','WorkLifeBalance_3','WorkLifeBalance_4','RelationshipSatisfaction_1','RelationshipSatisfaction_2','RelationshipSatisfaction_3','RelationshipSatisfaction_4','JobSatisfaction_1','JobSatisfaction_2','JobSatisfaction_3','JobSatisfaction_4','EnvironmentSatisfaction_1','EnvironmentSatisfaction_2','EnvironmentSatisfaction_3','EnvironmentSatisfaction_4'],axis=1,inplace=True)
df2=final_test
for col in list(df2):
df2[col]=df2[col]/df2[col].max()
#list(df2)
df3=df1[list(df2)]
#if(list(df3)==list(df2)):
#print('y')
#print(list(df2))
X_train=np.asarray(df3)
Y_train=np.asarray(train['Attrition'])
X_test=np.asarray(df2)
test1['EmployeeNumber']=np.asarray(empnum).tolist()
lr=LogisticRegression(solver='liblinear').fit(X_train,Y_train)
yhat=lr.predict(X_test)
yhat.tolist()
test1['Attrition'] = yhat
Attrition={1:'Yes',0:'No'}
test1.Attrition=[Attrition[item] for item in test1.Attrition]
conf=[]
for i in (lr.predict_proba(X_test).tolist()):
i= max(i)
conf.append(i)
#print(len(conf))
for j in range(len(conf)):
conf[j]=conf[j]*100
conf[j] = round(conf[j], 2)
test1['Reliability Percentage'] = conf
#added affecting parameters here
l=np.abs(lr.coef_).tolist()
coefs = [item for sublist in l for item in sublist]
data=np.asarray(df2).tolist()
weights=[]
for row in data:
c=np.multiply(row,coefs).tolist()
weights.append(c)
cols=list(df2)
L=[]
for val in weights:
dic = dict(enumerate(val))
L.append(dic)
ColWeights=[]
for dic in L:
i=0
tempDic={}
for key,value in dic.items():
key=cols[i]
tempDic[key]=value
i=i+1
ColWeights.append(tempDic)
df_yes=test1[test1.Attrition =='Yes']
df_no=test1[test1.Attrition =='No']
for index, row in df_yes.iterrows():
if(row['Attrition']=='Yes'):
yes_changable_cols=['YearsWithCurrManager',
'MonthlyIncome',
'YearsInCurrentRole',
'DistanceFromHome',
'YearsSinceLastPromotion',
'JobLevel_1',
'JobLevel_2',
'JobLevel_3',
'JobLevel_4',
'BusinessTravel_Non-Travel',
'BusinessTravel_Travel_Frequently',
'BusinessTravel_Travel_Rarely',
'OverTime_Yes']
Col_Weights_Yes=[]
for dic in ColWeights:
a={}
for k,v in dic.items():
if k in yes_changable_cols :
a[k]=v
Col_Weights_Yes.append(a)
AscendingCols=[]
for dic in Col_Weights_Yes:
AscendingCols.append((sorted(dic, key=dic.get)))
AllParams=[]
for h in AscendingCols:
params=[ h[12], h[11], h[10], h[9], h[8] ]
AllParams.append(params)
frame=pd.DataFrame(AllParams)
frame.columns =['YesParam_1','YesParam_2','YesParam_3','YesParam_4','YesParam_5']
df_yes=pd.concat([df_yes, frame], axis=1)
df_yes = df_yes[np.isfinite(df_yes['Age'])]
#df_yes=df_yes[df_yes.Age != float('nan')]
#disp=df_yes[df_yes.Attrition=='Yes']
disp=df_yes[['EmployeeNumber','Reliability Percentage','YesParam_1','YesParam_2']]
disp.drop(labels=[],axis=1,inplace=True)
#print(disp.shape)
for index, row in df_no.iterrows():
if(row['Attrition']=='No'):
aff_params_no=['YearsWithCurrManager',
'YearsInCurrentRole',
'MonthlyIncome',
'YearsAtCompany',
'TotalWorkingYears']
#MAIN PARAMS FOR NO
Col_Weights_No=[]
for dic in ColWeights:
b={}
for k,v in dic.items():
if k in aff_params_no :
b[k]=v
Col_Weights_No.append(b)
AscendingCols1=[]
for dic in Col_Weights_No:
AscendingCols1.append((sorted(dic, key=dic.get)))
AllParams1=[]
for h in AscendingCols1:
params1=[ h[4], h[3], h[2], h[1], h[0] ]
AllParams1.append(params1)
frame1=pd.DataFrame(AllParams1)
frame1.columns =['NoParam_1','NoParam_2','NoParam_3','NoParam_4','NoParam_5']
df_no=pd.concat([df_no, frame1], axis=1)
df_no = df_no[np.isfinite(df_no['Age'])]
#df_no=df_no[df_no.Age !=float('nan')]
#disp=test1[test1.Attrition=='Yes']
#disp=disp[['EmployeeNumber','Reliability Percentage','AffectingParam_1','AffectingParam_2']]
#disp.drop(labels=[],axis=1,inplace=True)
#print(disp.shape)
#for index, row in test1.iterrows():
#if(row['Attrition']=='Yes'):
#test1['NoParam_1']=' '
#test1['NoParam_2']=' '
#test1['NoParam_3']=' '
#test1['NoParam_4']=' '
#test1['NoParam_5']=' '
#elif(row['Attrition']=='No'):
#test1['YesParam_1']=' '
#test1['YesParam_2']=' '
#test1['YesParam_3']=' '
#test1['YesParam_4']=' '
#test1['YesParam_5']=' '
writer = pd.ExcelWriter('Result.xlsx', engine='xlsxwriter')
#store your dataframes in a dict, where the key is the sheet name you want
frames = {'Yes_Predictions': df_yes, 'No_predictions': df_no}
#now loop thru and put each on a specific sheet
for sheet, frame in frames.items(): # .use .items for python 3.X
frame.to_excel(writer, sheet_name = sheet)
#critical last step
writer.save()
#test1.to_excel('result.xlsx')
return render_template("upload_printed.html",tables=[disp.to_html(classes='data')], titles=disp.columns.values[-1:])
#Routing to page when Attribute Entry is selected
@app.route('/attribute_entry')
def attribute_entry():
return render_template('attribute_entry.html')
#Obtaining values from attribute entry and processing them
@app.route('/yes', methods=['GET', 'POST'])
def yes():
#Obtaining the values from HTML form
age=int(request.form['age'])
dfh=int(request.form['dfh'])
ncw=int(request.form['ncw'])
twy=int(request.form['twy'])
ylp=int(request.form['ylp'])
yac=int(request.form['yac'])
ycr=int(request.form['ycr'])
ycm=int(request.form['ycm'])
tly=int(request.form['tly'])
shp=int(request.form['shp'])
mi=int(request.form['mi'])
ji=request.form['ji']
jl=request.form['jl']
ot=request.form['ot']
bt=request.form['bt']
jr=request.form['jr']
el=request.form['el']
ms=request.form['ms']
ef=request.form['ef']
sol=request.form['sol']
pr=int(request.form['pr'])
#print(age,'\n',dfh,'\n',ncw,'\n',twy,'\n',ylp,'\n',yac,'\n',ycr,'\n',ycm,'\n',tly,'\n',
#shp,'\n',mi,'\n',ji,'\n',jl,'\n',ot,'\n',bt,'\n',jr,'\n',el,'\n',ms,'\n',ef,'\n',sol,'\n',pr)
#Initializing the one hot encoded columns to 0
ms_S=0
ms_M=0
ms_D=0
ef_HR=0
ef_TD=0
ef_LS=0
ef_Ma=0
ef_Me=0
ef_O=0
jr_HCR=0
jr_HR=0
jr_LT=0
jr_M=0
jr_MD=0
jr_RD=0
jr_RS=0
jr_SE=0
jr_SR=0
bt_NT=0
bt_TF=0
bt_TR=0
ji_1=0
ji_2=0
ji_3=0
ji_4=0
ot_N=0
ot_Y=0
sol_0=0
sol_1=0
sol_2=0
sol_3=0
jl_1=0
jl_2=0
jl_3=0
jl_4=0
jl_5=0
el_1=0
el_2=0
el_3=0
el_4=0
el_5=0
#Setting the value obtained from form to 1
if(ms=="1"):
ms_S=1
elif(ms=="2"):
ms_M=1
else:
ms_D=1
if(ef=="1"):
ef_HR=1
elif(ef=="2"):
ef_TD=1
elif(ef=="3"):
ef_LS=1
elif(ef=="4"):
ef_Ma=1
elif(ef=="5"):
ef_Me=1
else:
ef_O=1
if(jr=="1"):
jr_HCR=1
elif(jr=="2"):
jr_HR=1
elif(jr=="3"):
jr_LT=1
elif(jr=="4"):
jr_M=1
elif(jr=="5"):
jr_MD=1
elif(jr=="6"):
jr_RD=1
elif(jr=="7"):
jr_RS=1
elif(jr=="8"):
jr_SE=1
else:
jr_SR=1
if(bt=="0"):
bt_NT=1
elif(bt=="1"):
bt_TR=1
else:
bt_TF=1
if(ji=="1"):
ji_1=1
elif(ji=="2"):
ji_2=1
elif(ji=="3"):
ji_3=1
else:
ji_4=1
if(ot=="1"):
ot_Y=1
else:
ot_N=1
if(sol=="0"):
sol_0=1
elif(sol=="1"):
sol_1=1
elif(sol=="2"):
sol_2=1
else:
sol_3=1
if(jl=="1"):
jl_1=1
elif(jl=="2"):
jl_2=1
elif(jl=="3"):
jl_3=1
elif(jl=="4"):
jl_4=1
else:
jl_5=1
if(el=="1"):
el_1=1
elif(el=="2"):
el_2=1
elif(el=="3"):
el_3=1
elif(el=="4"):
el_4=1
else:
el_5=1
#Training the data
train=pd.read_excel('trainfile.xlsx')
train['TenurePerJob']=0
for i in range(0,len(train)):
if train.loc[i,'NumCompaniesWorked']>0:
train.loc[i,'TenurePerJob']=train.loc[i,'TotalWorkingYears']/train.loc[i,'NumCompaniesWorked']
a=np.median(train['MonthlyIncome'])
train['CompRatioOverall']=train['MonthlyIncome']/a
tpj=0
if(ncw>0):
tpj=twy/ncw
cro=mi/a
pmp=(pr+shp)/2
#train.drop(labels=['EmployeeCount','EmployeeNumber','Over18','StandardHours'],axis=1,inplace=True)
full_col_names=train.columns.tolist()
num_col_names=train.select_dtypes(include=[np.int64,np.float64]).columns.tolist()
num_cat_col_names=['Education','JobInvolvement','JobLevel','StockOptionLevel']
target=['Attrition']
num_col_names=list(set(num_col_names)-set(num_cat_col_names))
cat_col_names=list(set(full_col_names)-set(num_col_names)-set(target))
#print("total no of numerical features:",len(num_col_names))
#print("total no of categorical & ordered features:",len(cat_col_names))
cat_train=train[cat_col_names]
num_train=train[num_col_names]
for col in num_col_names:
if num_train[col].skew()>0.80:
num_train[col]=np.log1p(num_train[col])
for col in cat_col_names:
col_dummies=pd.get_dummies(cat_train[col],prefix=col)
cat_train= | pd.concat([cat_train,col_dummies],axis=1) | pandas.concat |
import pandas as pd
from pandas_profiling.config import config
from pandas_profiling.report.presentation.frequency_table_utils import freq_table
from pandas_profiling.visualisation.plot import histogram
from pandas_profiling.report.presentation.core import (
Image,
FrequencyTable,
FrequencyTableSmall,
Sequence,
Table,
VariableInfo,
)
from pandas_profiling.report.structure.variables.render_common import render_common
def render_categorical(summary):
varid = summary["varid"]
n_obs_cat = config["vars"]["cat"]["n_obs"].get(int)
image_format = config["plot"]["image_format"].get(str)
template_variables = render_common(summary)
# TODO: merge with boolean
mini_freq_table_rows = freq_table(
freqtable=summary["value_counts"],
n=summary["count"],
max_number_to_print=n_obs_cat,
)
# Top
# Element composition
info = VariableInfo(
summary["varid"], summary["varname"], "Categorical", summary["warnings"]
)
table = Table(
[
{
"name": "Distinct count",
"value": summary["n_unique"],
"fmt": "fmt",
"alert": "n_unique" in summary["warn_fields"],
},
{
"name": "Unique (%)",
"value": summary["p_unique"],
"fmt": "fmt_percent",
"alert": "p_unique" in summary["warn_fields"],
},
{
"name": "Missing",
"value": summary["n_missing"],
"fmt": "fmt",
"alert": "n_missing" in summary["warn_fields"],
},
{
"name": "Missing (%)",
"value": summary["p_missing"],
"fmt": "fmt_percent",
"alert": "p_missing" in summary["warn_fields"],
},
{
"name": "Memory size",
"value": summary["memory_size"],
"fmt": "fmt_bytesize",
"alert": False,
},
]
)
fqm = FrequencyTableSmall(mini_freq_table_rows)
# TODO: settings 3,3,6
template_variables["top"] = Sequence([info, table, fqm], sequence_type="grid")
# Bottom
items = []
frequency_table = FrequencyTable(
template_variables["freq_table_rows"],
name="Common Values",
anchor_id=f"{varid}common_values",
)
items.append(frequency_table)
check_compositions = config["vars"]["cat"]["check_composition"].get(bool)
if check_compositions:
length_table = Table(
[
{
"name": "Max length",
"value": summary["max_length"],
"fmt": "fmt_numeric",
"alert": False,
},
{
"name": "Mean length",
"value": summary["mean_length"],
"fmt": "fmt_numeric",
"alert": False,
},
{
"name": "Min length",
"value": summary["min_length"],
"fmt": "fmt_numeric",
"alert": False,
},
],
name="Length",
anchor_id=f"{varid}lengthstats",
)
histogram_bins = 10
length = Image(
histogram(summary["length"], summary, histogram_bins),
image_format=image_format,
alt="Scatter",
name="Length",
anchor_id=f"{varid}length",
)
tbl = Sequence(
[length, length_table],
anchor_id=f"{varid}tbl",
name="Length",
sequence_type="grid",
)
items.append(tbl)
n_freq_table_max = config["n_freq_table_max"].get(int)
citems = []
vc = | pd.Series(summary["category_alias_values"]) | pandas.Series |
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_features.ipynb (unless otherwise specified).
__all__ = ['read_tsv', 'gzip_reading', 'school_plan__features', 'translate_latlng', 'kdtree_neighbors',
'train_plan__latlng', 'train_plan__nbusers', 'train_time_features', 'census_income_median',
'census_income_avg', 'gini_processing', 'gini', 'census_sentiment_analy', 'convert_sent2score',
'transform_churn_series', 'classifier']
# Cell
import pandas as pd
import numpy as np
import os
from zipfile import ZipFile
from scipy import spatial
import matplotlib.pyplot as plt
from tsfresh import extract_features
from tsfresh.feature_selection.relevance import calculate_relevance_table
import tsfresh
# Cell
def read_tsv(file:str)->pd.DataFrame:
return pd.read_csv(file, compression='gzip', sep='\t')
def gzip_reading(gzip_file)->dict:
'Read all tsv.gz files in the zip file and returning a dictionary (key:filename, value:data)'
archive = ZipFile(gzip_file, 'r')
files = {name: archive.open(name) for name in archive.namelist() if
(name.endswith('.gz') and not name.startswith('_'))}
files_names = [i.split('.')[0] for i in files.keys()]
# reading the designated files into dict
dt = {}
for name, key in zip(files_names, files.keys()):
dt[name] = read_tsv(files[key])
return dt
def school_plan__features(data:pd.DataFrame)->pd.DataFrame:
'Calculate the number of school (and its associated types) within the planning area'
school_count = data.groupby('planning_area', as_index=False).size().rename({'size':'number_school'}, axis=1)
school_cat_count = (data
.groupby(['planning_area','category'], as_index=False).size()
.rename({'size':'number_school'}, axis=1)
.pivot(index='planning_area', columns='category', values='number_school').fillna(0)
).reset_index()
return school_count.merge(school_cat_count)
def translate_latlng(input:list)->list:
'Translating the lat lng into tuple format, to be used to mathematically identify the nearest neighbor'
latlong_location_str = [i.replace(" ","").replace("\"", "").split(",") for i in input]
latlong_location_num = [(float(x), float(y)) for x, y in latlong_location_str]
return latlong_location_num
def kdtree_neighbors(reference:list, query_data:list)->list:
'Identify the nearest neighbor for *query_data*[list of (lat,lng)] to the *reference*[list of (lat,lng)], returning the matching reference index'
tree = spatial.KDTree(reference)
return tree.query(query_data)[1]
def train_plan__latlng(data:pd.DataFrame)->pd.DataFrame:
list_tuple_latlng = translate_latlng(data['latlong'])
data['lat'] = [i[0] for i in list_tuple_latlng]
data['lng'] = [i[1] for i in list_tuple_latlng]
return data.groupby('planning_area', as_index=False)[['lat','lng']].median()
def train_plan__nbusers(data:pd.DataFrame)->pd.DataFrame:
return data.groupby('planning_area', as_index=False).size().rename({'size':'users_nb'}, axis=1)
def train_time_features(data:pd.DataFrame):
'Modify the train dataset inplace to generate time features (*month_delta* and *account start year*)'
data['account_start_date'] = | pd.to_datetime(data['account_start_date']) | pandas.to_datetime |
import pandas as pd
# from matplotlib import pyplot as plt
# import matplotlib.dates as md
import datetime
import glob
from io import BytesIO
import base64
from pymongo import MongoClient
class Plots():
def test_plot():
plt.plot([1,2,3,4,5,6,7,8,9])
plt.rcParams["figure.figsize"] = (10,5)
buf = BytesIO()
plt.savefig(buf, format='png')
plt.close()
buf.seek(0)
buffer = b''.join(buf)
buffer = base64.b64encode(buffer)
buffer = buffer.decode('utf-8')
return buffer
def get_medias(dias=8):
# ano = '*' if ano=='' or ano==None else ano
# mes = '*' if mes=='' or mes==None else f'{int(mes):02d}'
# dia = '*' if dia=='' or dia==None else f'{int(dia):02d}'
# data = f'{dia}/{mes}/{ano}'
# arquivos = glob.glob(f'./Data/{ano}_{mes}_{dia}.txt')
arquivos = glob.glob(f'./Data/*.txt')
arquivos.sort()
arquivos = arquivos[-dias:]
df_week = pd.DataFrame(columns=['Data','Media_Pressao','Media_Temperatura','Media_Umidade'])
for arquivo in arquivos:
df = pd.read_csv(arquivo, sep=';')
dados = {
'Data':df.loc[1,'Data'],
'Media_Pressao':df['Pressao'].mean(),
'Media_Umidade':df['Umidade'].mean(),
'Media_Temperatura':df['Temperatura2'].mean()
}
df_week = df_week.append([dados], ignore_index=True)
df_week['Data'] = df_week['Data'].astype('datetime64[ns]')
data = df_week.loc[1,'Data'].strftime("%d/%m/%Y")
hora = df_week['Data'].dt.strftime('%d/%m')
return hora.to_list(), df_week['Media_Pressao'].to_list(), df_week['Media_Umidade'].to_list(), df_week['Media_Temperatura'].to_list(), data
def get_evolucaop(ano='*', mes='*', dia='*'):
buf = BytesIO()
mes = f'{int(mes):02d}'
dia = f'{int(dia):02d}'
arquivo = glob.glob(f'./Data/{ano}_{mes}_{dia}.txt')[-1]
df = | pd.read_csv(arquivo, sep=';') | pandas.read_csv |
"""Utility functions for logging operations."""
__author__ = "<NAME>"
import logging
import warnings
import pandas as pd
from pathlib import Path
from typing import Union
def remove_inner_brackets(message: str) -> str:
"""Remove the inner brackets i.e., [ or ], from a string, outer brackets are kept.
Parameters
----------
message: str
The string to remove the inner brackets from.
Returns
-------
str:
A new message without any inner brackets.
"""
level = 0
new_message = ""
for char in message:
if char == "[":
if level == 0:
new_message += char
level += 1
elif char == "]":
if level == 1:
new_message += char
level -= 1
else:
new_message += char
assert level >= 0
return new_message
def delete_logging_handlers(logger: logging.Logger):
"""Delete all logging handlers that are not stream-handlers.
Parameters
----------
logger : logging.Logger
The logger.
"""
if len(logger.handlers) > 1:
logger.handlers = [
h for h in logger.handlers if type(h) == logging.StreamHandler
]
assert len(logger.handlers) == 1, "Multiple logging StreamHandlers present!!"
def add_logging_handler(
logger: logging.Logger, logging_file_path: Union[str, Path]
) -> logging.FileHandler:
"""Add a logging file-handler to the logger.
Parameters
----------
logger : logging.Logger
The logger.
logging_file_path : Union[str, Path]
The file path for the file handler.
Returns
-------
logging.FileHandler
The file-handler that is added to the given logger.
"""
if not isinstance(logging_file_path, Path):
logging_file_path = Path(logging_file_path)
if logging_file_path.exists():
warnings.warn(
f"Logging file ({logging_file_path}) already exists. "
f"This file will be overwritten!",
RuntimeWarning,
)
# Clear the file
# -> because same FileHandler is used when calling this method twice
open(logging_file_path, "w").close()
f_handler = logging.FileHandler(logging_file_path, mode="w")
f_handler.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
f_handler.setLevel(logging.INFO)
logger.addHandler(f_handler)
return f_handler
def logging_file_to_df(logging_file_path: str) -> pd.DataFrame:
"""Parse the logged messages into a dataframe.
Parameters
----------
logging_file_path: str
The file path where the logged messages are stored.
Returns
-------
pd.DataFrame
A DataFrame containing the log_time, name, log_level and log message.
"""
column_names = ["log_time", "name", "log_level", "message"]
data = {col: [] for col in column_names}
with open(logging_file_path, "r") as f:
for line in f:
line = line.split(" - ")
for idx, col in enumerate(column_names):
data[col].append(line[idx].strip())
df = pd.DataFrame(data)
df["log_time"] = | pd.to_datetime(df["log_time"]) | pandas.to_datetime |
import os
import json
def format_ts(ts):
return ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
def get_sim_folder_path():
#return '/Users/ngoh511/Documents/projects/PycharmProjects/volttron_ep_toolkit/dashboard/src/simulations'
return '/home/vuser/volttron/simulations/'
def get_sim_file_path(bldg, sim, baseline=False):
file = "baseline_eplusout.sql" if baseline else "eplusout.sql"
return os.path.join(get_sim_folder_path(), bldg, sim, file)
def get_power_file_path(bldg, sim):
return os.path.join(get_sim_folder_path(), bldg, sim, 'tccpower.csv')
def get_baseline_file_path(bldg, sim):
return os.path.join(get_sim_folder_path(), bldg, sim, 'tccpower_baseline.csv')
def get_ilc_config_path(bldg, sim):
name = ''
path = ''
if 'small_office' in bldg:
name = 'so_ilc_config'
elif 'medium_office' in bldg:
name = 'mo_ilc_config'
elif 'large_office' in bldg:
name = 'lo_ilc_config'
elif 'building1' in bldg:
name = 'b1_ilc_config'
if name != '':
path = os.path.join(get_sim_folder_path(), bldg, sim, name)
return path
def get_tcc_fd_config_path(bldg, sim):
name = 'meter-config-fixed-demand'
path = os.path.join(get_sim_folder_path(), bldg, sim, name)
return path
def get_tcc_config_path(bldg, sim, zone):
return os.path.join(get_sim_folder_path(), bldg, sim, zone+'-config')
def get_demand_limit(bldg, sim):
demand_limit = -9999
if not 'tcc_fp' in bldg:
try:
path = get_ilc_config_path(bldg, sim)
point = 'demand_limit'
if 'tcc_fd' in bldg: # fixed_demand
path = get_tcc_fd_config_path(bldg, sim)
point = 'demand_limit_threshold'
if os.path.isfile(path):
with open(path, 'r') as fh:
config = json.load(fh)
if point in config:
demand_limit = float(config[point])/1000.0
except Exception as e:
print(e.message)
return demand_limit
def get_tcc_comfort(bldg, sim, zone):
low_limit = -9999
high_limit = -9999
path = get_tcc_config_path(bldg, sim, zone)
try:
if os.path.isfile(path):
with open(path, 'r') as fh:
config = json.load(fh)
low_limit = float(config['tMin'])*1.8+32
high_limit = float(config['tMax'])*1.8+32
except Exception as e:
print(e.message)
return low_limit, high_limit
def get_sim_data(bldg, sim, query, baseline=False, year=2000):
import sqlite3
import pandas as pd
import traceback
df = None
sim_path = get_sim_file_path(bldg, sim, baseline=baseline)
if os.path.isfile(sim_path):
try:
conn = sqlite3.connect(sim_path)
df = pd.read_sql_query(query, conn)
df = add_ts_col(df, year)
except Exception as e:
traceback.print_exc()
finally:
conn.close()
return df
def add_ts_col(df, year=None):
import pandas as pd
import pytz
import datetime
local_tz = pytz.timezone('US/Pacific')
if year is None:
year = datetime.datetime.utcnow().year
df['Year'] = year
df['ts'] = | pd.to_datetime(df[['Year', 'Month', 'Day', 'Hour', 'Minute']]) | pandas.to_datetime |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import itertools
from numpy import nan
import numpy as np
from pandas import (DataFrame, Series, Timestamp, date_range, compat,
option_context, Categorical)
from pandas.core.arrays import IntervalArray, integer_array
from pandas.compat import StringIO
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestDataFrameBlockInternals():
def test_cast_internals(self, float_frame):
casted = DataFrame(float_frame._data, dtype=int)
expected = DataFrame(float_frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(float_frame._data, dtype=np.int32)
expected = DataFrame(float_frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self, float_frame):
float_frame['E'] = 7.
consolidated = float_frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
float_frame['F'] = 8.
assert len(float_frame._data.blocks) == 3
float_frame._consolidate(inplace=True)
assert len(float_frame._data.blocks) == 1
def test_consolidate_inplace(self, float_frame):
frame = float_frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
float_frame[chr(letter)] = chr(letter)
def test_values_consolidate(self, float_frame):
float_frame['E'] = 7.
assert not float_frame._data.is_consolidated()
_ = float_frame.values # noqa
assert float_frame._data.is_consolidated()
def test_modify_values(self, float_frame):
float_frame.values[5] = 5
assert (float_frame.values[5] == 5).all()
# unconsolidated
float_frame['E'] = 7.
float_frame.values[6] = 6
assert (float_frame.values[6] == 6).all()
def test_boolean_set_uncons(self, float_frame):
float_frame['E'] = 7.
expected = float_frame.values.copy()
expected[expected > 1] = 2
float_frame[float_frame > 1] = 2
assert_almost_equal(expected, float_frame.values)
def test_values_numeric_cols(self, float_frame):
float_frame['foo'] = 'bar'
values = float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
def test_values_lcd(self, mixed_float_frame, mixed_int_frame):
# mixed lcd
values = mixed_float_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_float_frame[['A', 'B', 'C']].values
assert values.dtype == np.float32
values = mixed_float_frame[['C']].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = mixed_int_frame[['A', 'D']].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = mixed_int_frame[['A', 'B', 'C']].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = mixed_int_frame[['B', 'C']].values
assert values.dtype == np.uint64
values = mixed_int_frame[['A', 'C']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C', 'D']].values
assert values.dtype == np.int64
values = mixed_int_frame[['A']].values
assert values.dtype == np.int32
values = mixed_int_frame[['C']].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A': [2 ** 63 - 1]})
result = df['A']
expected = Series(np.asarray([2 ** 63 - 1], np.int64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2 ** 63]})
result = df['A']
expected = Series(np.asarray([2 ** 63], np.uint64), name='A')
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
"""Failure analysis of national-scale networks
For transport modes at national scale:
- rail
- Can do raod as well
Input data requirements
-----------------------
1. Correct paths to all files and correct input parameters
2. csv sheets with results of flow mapping based on MIN-MAX generalised costs estimates:
- origin - String node ID of Origin
- destination - String node ID of Destination
- origin_province - String name of Province of Origin node ID
- destination_province - String name of Province of Destination node ID
- min_edge_path - List of string of edge ID's for paths with minimum generalised cost flows
- max_edge_path - List of string of edge ID's for paths with maximum generalised cost flows
- min_distance - Float values of estimated distance for paths with minimum generalised cost flows
- max_distance - Float values of estimated distance for paths with maximum generalised cost flows
- min_time - Float values of estimated time for paths with minimum generalised cost flows
- max_time - Float values of estimated time for paths with maximum generalised cost flows
- min_gcost - Float values of estimated generalised cost for paths with minimum generalised cost flows
- max_gcost - Float values of estimated generalised cost for paths with maximum generalised cost flows
- industry_columns - All daily tonnages of industry columns given in the OD matrix data
3. Shapefiles
- edge_id - String/Integer/Float Edge ID
- geometry - Shapely LineString geomtry of edges
Results
-------
Csv sheets with results of failure analysis:
1. All failure scenarios
- edge_id - String name or list of failed edges
- origin - String node ID of Origin of disrupted OD flow
- destination - String node ID of Destination of disrupted OD flow
- origin_province - String name of Province of Origin node ID of disrupted OD flow
- destination_province - String name of Province of Destination node ID of disrupted OD flow
- no_access - Boolean 1 (no reroutng) or 0 (rerouting)
- min/max_distance - Float value of estimated distance of OD journey before disruption
- min/max_time - Float value of estimated time of OD journey before disruption
- min/max_gcost - Float value of estimated travel cost of OD journey before disruption
- new_cost - Float value of estimated cost of OD journey after disruption
- new_distance - Float value of estimated distance of OD journey after disruption
- new_path - List of string edge ID's of estimated new route of OD journey after disruption
- new_time - Float value of estimated time of OD journey after disruption
- dist_diff - Float value of Post disruption minus per-disruption distance
- time_diff - Float value Post disruption minus per-disruption timee
- min/max_tr_loss - Float value of estimated change in rerouting cost
- min/max_tons - Float values of total daily tonnages along disrupted OD pairs
- industry_columns - Float values of all daily tonnages of industry columns along disrupted OD pairs
2. Isolated OD scenarios - OD flows with no rerouting options
- edge_id - String name or list of failed edges
- origin_province - String name of Province of Origin node ID of disrupted OD flow
- destination_province - String name of Province of Destination node ID of disrupted OD flow
- industry_columns - Float values of all daily tonnages of industry columns along disrupted OD pairs
- min/max_tons - Float values of total daily tonnages along disrupted OD pairs
3. Rerouting scenarios - OD flows with rerouting options
- edge_id - String name or list of failed edges
- origin_province - String name of Province of Origin node ID of disrupted OD flow
- destination_province - String name of Province of Destination node ID of disrupted OD flow
- min/max_tr_loss - Float value of change in rerouting cost
- min/max_tons - Float values of total daily tonnages along disrupted OD pairs
4. Min-max combined scenarios - Combined min-max results along each edge
- edge_id - String name or list of failed edges
- no_access - Boolean 1 (no reroutng) or 0 (rerouting)
- min/max_tr_loss - Float values of change in rerouting cost
- min/max_tons - Float values of total daily tonnages affted by disrupted edge
"""
import ast
import copy
import csv
import itertools
import math
import operator
import os
import sys
import igraph as ig
import networkx as nx
import numpy as np
import pandas as pd
from atra.utils import *
from atra.transport_flow_and_failure_functions import *
def main():
"""Estimate failures
Specify the paths from where you want to read and write:
1. Input data
2. Intermediate calcuations data
3. Output results
Supply input data and parameters
1. Names of modes
String
2. Names of min-max tons columns in sector data
List of string types
3. Min-max names of names of different types of attributes - paths, distance, time, cost, tons
List of string types
4. Names of commodity/industry columns for which min-max tonnage column names already exist
List of string types
5. Percentage of OD flows that are assumed disrupted
List of float type
6. Condition on whether analysis is single failure or multiple failure
Boolean condition True or False
Give the paths to the input data files:
1. Network edges csv and shapefiles
2. OD flows csv file
3. Failure scenarios csv file
Specify the output files and paths to be created
"""
data_path, calc_path, output_path = load_config()['paths']['data'], load_config()[
'paths']['calc'], load_config()['paths']['output']
# Supply input data and parameters
modes = [
{
'sector':'rail',
'min_tons_column':'min_total_tons',
'max_tons_column':'max_total_tons',
}
]
types = ['min', 'max']
path_types = ['min_edge_path', 'max_edge_path']
dist_types = ['min_distance', 'max_distance']
time_types = ['min_time', 'max_time']
cost_types = ['min_gcost', 'max_gcost']
index_cols = ['origin_id', 'destination_id', 'origin_province', 'destination_province']
percentage = [100.0]
single_edge = True
# Give the paths to the input data files
network_data_path = os.path.join(data_path,'network')
flow_paths_data = os.path.join(output_path, 'flow_mapping_paths')
fail_scenarios_data = os.path.join(
output_path, 'hazard_scenarios')
# Specify the output files and paths to be created
shp_output_path = os.path.join(output_path, 'failure_shapefiles')
if os.path.exists(shp_output_path) == False:
os.mkdir(shp_output_path)
fail_output_path = os.path.join(output_path, 'failure_results')
if os.path.exists(fail_output_path) == False:
os.mkdir(fail_output_path)
all_fail_scenarios = os.path.join(fail_output_path,'all_fail_scenarios')
if os.path.exists(all_fail_scenarios) == False:
os.mkdir(all_fail_scenarios)
isolated_ods = os.path.join(fail_output_path,'isolated_od_scenarios')
if os.path.exists(isolated_ods) == False:
os.mkdir(isolated_ods)
isolated_ods = os.path.join(fail_output_path,'isolated_od_scenarios','multi_mode')
if os.path.exists(isolated_ods) == False:
os.mkdir(isolated_ods)
rerouting = os.path.join(fail_output_path,'rerouting_scenarios')
if os.path.exists(rerouting) == False:
os.mkdir(rerouting)
minmax_combine = os.path.join(fail_output_path,'minmax_combined_scenarios')
if os.path.exists(minmax_combine) == False:
os.mkdir(minmax_combine)
# Create the multi-modal networks
print ('* Creating multi-modal networks')
mds = ['road', 'rail', 'port', 'multi']
G_multi_df = []
for m in range(len(mds)):
# Load mode igraph network and GeoDataFrame
print ('* Loading {} igraph network and GeoDataFrame'.format(mds[m]))
G_df = pd.read_csv(os.path.join(network_data_path,'{}_edges.csv'.format(mds[m])),encoding='utf-8-sig').fillna(0)
if mds[m] == 'rail':
e_flow = pd.read_csv(os.path.join(output_path,'flow_mapping_combined','weighted_flows_{}_100_percent.csv'.format(mds[m])))[['edge_id','max_total_tons']]
G_df = pd.merge(G_df,e_flow[['edge_id','max_total_tons']],how='left',on=['edge_id'])
G_df = G_df[G_df['max_total_tons'] > 0]
elif mds[m] == 'multi':
G_df = G_df[G_df['operation_state'] == 'operational']
G_multi_df.append(G_df)
G_multi_df = pd.concat(G_multi_df, axis=0, sort='False', ignore_index=True)
cols = [c for c in G_multi_df.columns.values.tolist() if c not in ['from_node','to_node']]
G_multi_df = G_multi_df[['from_node', 'to_node'] + cols]
for m in range(len(modes)):
# Create failure scenarios
print ('* Creating {} failure scenarios'.format(modes[m]['sector']))
fail_df = pd.read_csv(os.path.join(
fail_scenarios_data,
'{}_hazard_intersections.csv'.format(modes[m]['sector'])))
ef_sc_list = edge_failure_sampling(fail_df,'edge_id')
print ('Number of failure scenarios',len(ef_sc_list))
for perct in percentage:
# Load flow paths
print ('* Loading {} flow paths'.format(modes[m]['sector']))
flow_df = pd.read_csv(os.path.join(flow_paths_data,'flow_paths_{}_{}_percent_assignment.csv'.format(modes[m]['sector'],int(perct))),encoding='utf-8')
if modes[m]['sector'] == 'road':
e_flow = pd.read_csv(os.path.join(output_path,'flow_mapping_combined','weighted_flows_{}_{}_percent.csv'.format(modes[m]['sector'],int(perct))))[['edge_id','max_total_tons']]
ef_df = pd.DataFrame(ef_sc_list,columns=['edge_id'])
ef_df = pd.merge(ef_df,G_multi_df[['edge_id','road_type']],how='left',on=['edge_id']).fillna(0)
ef_df = | pd.merge(ef_df,e_flow,how='left',on=['edge_id']) | pandas.merge |
"""Metric Functions.
"""
import numpy as np
import pandas as pd
import statsmodels.api as sm
import itertools as it
import scipy.stats as st
from sklearn.preprocessing import PolynomialFeatures as pnf
__all__ = ['deviation',
'vif',
'mean_absolute_percentage_error',
'average_absolute_deviation',
'median_absolute_deviation',
'calculate_interaction']
def deviation(container, method='mean', if_abs=True):
"""Deviation.
"""
if method == 'mean':
center = np.nanmean(container)
elif method == 'median':
center = np.nanmedian(container)
resIter = map(lambda x: x - center, container)
if if_abs:
resIter = map(np.absolute, resIter)
res = np.fromiter(resIter, dtype=np.float)
return res
def vif(y, X):
"""Variance inflation factor.
"""
assert isinstance(y, pd.Series)
assert isinstance(X, pd.DataFrame)
# Change input to array
y_arr = y.values
X_arr = X.values
# Calculate a linear regression(Ordinary Least Square)
reg = sm.add_constant(X_arr)
est = sm.OLS(y_arr, reg).fit()
# Get a R-square
rsq = est.rsquared
# Get a VIF
vif = 1 / (1 - rsq)
return vif
def mean_absolute_percentage_error(measure, predict, thresh=3.0):
'''Mean Absolute Percentage Error.
It is a percent of errors.
It measures the prediction accuracy of a forecasting method in Statistics
with the real mesured values and the predicted values, for example in trend
estimation.
If MAPE is 5, it means this prediction method potentially has 5% error.
It cannot be used if there are zero values,
because there would be a division by zero.
'''
mape = np.mean(np.absolute((measure - predict) / measure)) * 100
return mape
def average_absolute_deviation(measure, predict, thresh=2):
'''Average Absolute Deviation.
It is ...
It measures the prediction accuracy of a forecasting method in Statistics
with the real mesured values and the predicted values, for example in trend
estimation.
If MAD is 5, it means this prediction method potentially has...
'''
aad = np.mean(np.absolute(measure - predict))
return aad
def median_absolute_deviation(measure, predict, thresh=2):
'''Median Absolute Deviation.
It is ...
It measures the prediction accuracy of a forecasting method in Statistics
with the real mesured values and the predicted values, for example in trend
estimation.
If MAD is 5, it means this prediction method potentially has...
'''
mad = np.median(np.absolute(measure - predict))
return mad
def calculate_interaction(rankTbl, pvTbl, target, ranknum=10):
"""Feature interaction calculation.
"""
rankTop = rankTbl[:ranknum]
interPvt = pvTbl[rankTop['var_name']]
interAct = pnf(degree=2, interaction_only=True)
interTbl = pd.DataFrame(interAct.fit_transform(interPvt),
index=interPvt.index).iloc[:, 1:]
rankTop_col = list(rankTop['var_name'])
interAct_col = list(map(' xx '.join,
list(it.combinations(rankTop['var_name'], 2))))
interTbl.columns = rankTop_col + interAct_col
# Generate a Result Table
col = ['slope', 'intercept', 'corr_coef', 'p_value', 'std_err']
ind = interTbl.columns
regMatrix = | pd.DataFrame(index=ind, columns=col) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 3 15:17:10 2017
@author: zeinabhakimi
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import CountVectorizer
from scipy.sparse import lil_matrix
from sklearn.svm import SVC
train = | pd.read_csv('result_train.csv') | pandas.read_csv |
from __future__ import division
import numpy as np
import os.path
import sys
import pandas as pd
from base.uber_model import UberModel, ModelSharedInputs
from .therps_functions import TherpsFunctions
import time
from functools import wraps
def timefn(fn):
@wraps(fn)
def measure_time(*args, **kwargs):
t1 = time.time()
result = fn(*args, **kwargs)
t2 = time.time()
print("therps_model_rest.py@timefn: " + fn.func_name + " took " + "{:.6f}".format(t2 - t1) + " seconds")
return result
return measure_time
class TherpsInputs(ModelSharedInputs):
"""
Input class for Therps.
"""
def __init__(self):
"""Class representing the inputs for Therps"""
super(TherpsInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
"""
Therps constructor.
:param chem_name:
:param use:
:param formu_name:
:param percent_act_ing:
:param foliar_diss_hlife:
:param num_apps:
:param app_interval:
:param application_rate:
:param ld50_bird:
:param lc50_bird:
:param noaec_bird:
:param noael_bird:
:param species_of_the_tested_bird_avian_ld50:
:param species_of_the_tested_bird_avian_lc50:
:param species_of_the_tested_bird_avian_noaec:
:param species_of_the_tested_bird_avian_noael:
:param tw_bird_ld50:
:param tw_bird_lc50:
:param tw_bird_noaec:
:param tw_bird_noael:
:param mineau_sca_fact:
:param aw_herp_sm:
:param aw_herp_md:
:param aw_herp_slg:
:param awc_herp_sm:
:param awc_herp_md:
:param awc_herp_lg:
:param bw_frog_prey_mamm:
:param bw_frog_prey_herp:
:return:
"""
self.use = pd.Series([], dtype="object", name="use")
self.formu_name = pd.Series([], dtype="object", name="formu_name")
self.percent_act_ing = pd.Series([], dtype="float", name="percent_act_ing")
self.foliar_diss_hlife = pd.Series([], dtype="float64", name="foliar_diss_hlife")
self.num_apps = pd.Series([], dtype="int64", name="num_apps")
self.app_interval = pd.Series([], dtype="int", name="app_interval")
self.application_rate = pd.Series([], dtype="float", name="application_rate")
self.ld50_bird = pd.Series([], dtype="float", name="ld50_bird")
self.lc50_bird = pd.Series([], dtype="float", name="lc50_bird")
self.noaec_bird = pd.Series([], dtype="float", name="noaec_bird")
self.noael_bird = pd.Series([], dtype="float", name="noael_bird")
self.species_of_the_tested_bird_avian_ld50 = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_ld50")
self.species_of_the_tested_bird_avian_lc50 = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_lc50")
self.species_of_the_tested_bird_avian_noaec = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_noaec")
self.species_of_the_tested_bird_avian_noael = pd.Series([], dtype="float",
name="species_of_the_tested_bird_avian_noael")
self.tw_bird_ld50 = pd.Series([], dtype="float", name="tw_bird_ld50")
self.tw_bird_lc50 = pd.Series([], dtype="float", name="tw_bird_lc50")
self.tw_bird_noaec = pd.Series([], dtype="float", name="tw_bird_noaec")
self.tw_bird_noael = pd.Series([], dtype="float", name="tw_bird_noael")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.aw_herp_sm = pd.Series([], dtype="float", name="aw_herp_sm")
self.aw_herp_md = pd.Series([], dtype="float", name="aw_herp_md")
self.aw_herp_lg = pd.Series([], dtype="float", name="aw_herp_lg")
self.awc_herp_sm = pd.Series([], dtype="float", name="awc_herp_sm")
self.awc_herp_md = pd.Series([], dtype="float", name="awc_herp_md")
self.awc_herp_lg = pd.Series([], dtype="float", name="awc_herp_lg")
self.bw_frog_prey_mamm = pd.Series([], dtype="float", name="bw_frog_prey_mamm")
self.bw_frog_prey_herp = pd.Series([], dtype="float", name="bw_frog_prey_herp")
## application rates and days of applications
#self.app_rates = pd.Series([], dtype="object") #Series of lists, each list contains app_rates of a model simulation run
#self.day_out = pd.Series([], dtype="object") #Series of lists, each list contains day #'s of applications within a model simulaiton run
class TherpsOutputs(object):
"""
Output class for Therps.
"""
def __init__(self):
"""Class representing the outputs for Therps"""
super(TherpsOutputs, self).__init__()
## application rates and days of applications
#self.day_out = pd.Series([], dtype='object', name='day_out')
#self.app_rates = pd.Series([], dtype='object', name='app_rates')
# TODO: Add these back in after deciding how to handle the numpy arrays
# timeseries of concentrations related to herbiferous food sources
# self.out_c_ts_sg = pd.Series([], dtype='float') # short grass
# self.out_c_ts_blp = pd.Series([], dtype='float') # broad-leafed plants
# self.out_c_ts_fp = pd.Series([], dtype='float') # fruits/pods
#
# self.out_c_ts_mean_sg = pd.Series([], dtype='float') # short grass
# self.out_c_ts_mean_blp = pd.Series([], dtype='float') # broad-leafed plants
# self.out_c_ts_mean_fp = pd.Series([], dtype='float') # fruits/pods
# Table 5
self.out_ld50_ad_sm = pd.Series([], dtype='float', name="out_ld50_ad_sm")
self.out_ld50_ad_md = pd.Series([], dtype='float', name="out_ld50_ad_md")
self.out_ld50_ad_lg = pd.Series([], dtype='float', name="out_ld50_ad_lg")
self.out_eec_dose_bp_sm = pd.Series([], dtype='float', name="out_eec_dose_bp_sm")
self.out_eec_dose_bp_md = pd.Series([], dtype='float', name="out_eec_dose_bp_md")
self.out_eec_dose_bp_lg = pd.Series([], dtype='float', name="out_eec_dose_bp_lg")
self.out_arq_dose_bp_sm = pd.Series([], dtype='float', name="out_arq_dose_bp_sm")
self.out_arq_dose_bp_md = pd.Series([], dtype='float', name="out_arq_dose_bp_md")
self.out_arq_dose_bp_lg = pd.Series([], dtype='float', name="out_arq_dose_bp_lg")
self.out_eec_dose_fr_sm = pd.Series([], dtype='float', name="out_eec_dose_fr_sm")
self.out_eec_dose_fr_md = pd.Series([], dtype='float', name="out_eec_dose_fr_md")
self.out_eec_dose_fr_lg = pd.Series([], dtype='float', name="out_eec_dose_fr_lg")
self.out_arq_dose_fr_sm = pd.Series([], dtype='float', name="out_arq_dose_fr_sm")
self.out_arq_dose_fr_md = pd.Series([], dtype='float', name="out_arq_dose_fr_md")
self.out_arq_dose_fr_lg = pd.Series([], dtype='float', name="out_arq_dose_fr_lg")
self.out_eec_dose_hm_md = pd.Series([], dtype='float', name="out_eec_dose_hm_md")
self.out_eec_dose_hm_lg = pd.Series([], dtype='float', name="out_eec_dose_hm_lg")
self.out_arq_dose_hm_md = pd.Series([], dtype='float', name="out_arq_dose_hm_md")
self.out_arq_dose_hm_lg = pd.Series([], dtype='float', name="out_arq_dose_hm_lg")
self.out_eec_dose_im_md = pd.Series([], dtype='float', name="out_eec_dose_im_md")
self.out_eec_dose_im_lg = pd.Series([], dtype='float', name="out_eec_dose_im_lg")
self.out_arq_dose_im_md = pd.Series([], dtype='float', name="out_arq_dose_im_md")
self.out_arq_dose_im_lg = pd.Series([], dtype='float', name="out_arq_dose_im_lg")
self.out_eec_dose_tp_md = pd.Series([], dtype='float', name="out_eec_dose_tp_md")
self.out_eec_dose_tp_lg = pd.Series([], dtype='float', name="out_eec_dose_tp_lg")
self.out_arq_dose_tp_md = pd.Series([], dtype='float', name="out_arq_dose_tp_md")
self.out_arq_dose_tp_lg = pd.Series([], dtype='float', name="out_arq_dose_tp_lg")
# Table 6
self.out_eec_diet_herp_bl = pd.Series([], dtype='float', name="out_eec_diet_herp_bl")
self.out_eec_arq_herp_bl = pd.Series([], dtype='float', name="out_eec_arq_herp_bl")
self.out_eec_diet_herp_fr = pd.Series([], dtype='float', name="out_eec_diet_herp_fr")
self.out_eec_arq_herp_fr = pd.Series([], dtype='float', name="out_eec_arq_herp_fr")
self.out_eec_diet_herp_hm = pd.Series([], dtype='float', name="out_eec_diet_herp_hm")
self.out_eec_arq_herp_hm = pd.Series([], dtype='float', name="out_eec_arq_herp_hm")
self.out_eec_diet_herp_im = pd.Series([], dtype='float', name="out_eec_diet_herp_im")
self.out_eec_arq_herp_im = pd.Series([], dtype='float', name="out_eec_arq_herp_im")
self.out_eec_diet_herp_tp = pd.Series([], dtype='float', name="out_eec_diet_herp_tp")
self.out_eec_arq_herp_tp = pd.Series([], dtype='float', name="out_eec_arq_herp_tp")
# Table 7
self.out_eec_diet_herp_bl = pd.Series([], dtype='float', name="out_eec_diet_herp_bl")
self.out_eec_crq_herp_bl = pd.Series([], dtype='float', name="out_eec_crq_herp_bl")
self.out_eec_diet_herp_fr = pd.Series([], dtype='float', name="out_eec_diet_herp_fr")
self.out_eec_crq_herp_fr = pd.Series([], dtype='float', name="out_eec_crq_herp_fr")
self.out_eec_diet_herp_hm = pd.Series([], dtype='float', name="out_eec_diet_herp_hm")
self.out_eec_crq_herp_hm = pd.Series([], dtype='float', name="out_eec_crq_herp_hm")
self.out_eec_diet_herp_im = pd.Series([], dtype='float', name="out_eec_diet_herp_im")
self.out_eec_crq_herp_im = pd.Series([], dtype='float', name="out_eec_crq_herp_im")
self.out_eec_diet_herp_tp = pd.Series([], dtype='float', name="out_eec_diet_herp_tp")
self.out_eec_crq_herp_tp = pd.Series([], dtype='float', name="out_eec_crq_herp_tp")
# Table 8
self.out_eec_dose_bp_sm_mean = pd.Series([], dtype='float', name="out_eec_dose_bp_sm_mean")
self.out_eec_dose_bp_md_mean = pd.Series([], dtype='float', name="out_eec_dose_bp_md_mean")
self.out_eec_dose_bp_lg_mean = pd.Series([], dtype='float', name="out_eec_dose_bp_lg_mean")
self.out_arq_dose_bp_sm_mean = pd.Series([], dtype='float', name="out_arq_dose_bp_sm_mean")
self.out_arq_dose_bp_md_mean = pd.Series([], dtype='float', name="out_arq_dose_bp_md_mean")
self.out_arq_dose_bp_lg_mean = pd.Series([], dtype='float', name="out_arq_dose_bp_lg_mean")
self.out_eec_dose_fr_sm_mean = pd.Series([], dtype='float', name="out_eec_dose_fr_sm_mean")
self.out_eec_dose_fr_md_mean = pd.Series([], dtype='float', name="out_eec_dose_fr_md_mean")
self.out_eec_dose_fr_lg_mean = pd.Series([], dtype='float', name="out_eec_dose_fr_lg_mean")
self.out_arq_dose_fr_sm_mean = pd.Series([], dtype='float', name="out_arq_dose_fr_sm_mean")
self.out_arq_dose_fr_md_mean = pd.Series([], dtype='float', name="out_arq_dose_fr_md_mean")
self.out_arq_dose_fr_lg_mean = pd.Series([], dtype='float', name="out_arq_dose_fr_lg_mean")
self.out_eec_dose_hm_md_mean = pd.Series([], dtype='float', name="out_eec_dose_hm_md_mean")
self.out_eec_dose_hm_lg_mean = pd.Series([], dtype='float', name="out_eec_dose_hm_lg_mean")
self.out_arq_dose_hm_md_mean = pd.Series([], dtype='float', name="out_arq_dose_hm_md_mean")
self.out_arq_dose_hm_lg_mean = pd.Series([], dtype='float', name="out_arq_dose_hm_lg_mean")
self.out_eec_dose_im_md_mean = pd.Series([], dtype='float', name="out_eec_dose_im_md_mean")
self.out_eec_dose_im_lg_mean = pd.Series([], dtype='float', name="out_eec_dose_im_lg_mean")
self.out_arq_dose_im_md_mean = pd.Series([], dtype='float', name="out_arq_dose_im_md_mean")
self.out_arq_dose_im_lg_mean = pd.Series([], dtype='float', name="out_arq_dose_im_lg_mean")
self.out_eec_dose_tp_md_mean = pd.Series([], dtype='float', name="out_eec_dose_tp_md_mean")
self.out_eec_dose_tp_lg_mean = pd.Series([], dtype='float', name="out_eec_dose_tp_lg_mean")
self.out_arq_dose_tp_md_mean = pd.Series([], dtype='float', name="out_arq_dose_tp_md_mean")
self.out_arq_dose_tp_lg_mean = pd.Series([], dtype='float', name="out_arq_dose_tp_lg_mean")
# Table 9
self.out_eec_diet_herp_bl_mean = pd.Series([], dtype='float', name="out_eec_diet_herp_bl_mean")
self.out_eec_arq_herp_bl_mean = pd.Series([], dtype='float', name="out_eec_arq_herp_bl_mean")
self.out_eec_diet_herp_fr_mean = pd.Series([], dtype='float', name="out_eec_diet_herp_fr_mean")
self.out_eec_arq_herp_fr_mean = pd.Series([], dtype='float', name="out_eec_arq_herp_fr_mean")
self.out_eec_diet_herp_hm_mean = pd.Series([], dtype='float', name="out_eec_diet_herp_hm_mean")
self.out_eec_arq_herp_hm_mean = | pd.Series([], dtype='float', name="out_eec_arq_herp_hm_mean") | pandas.Series |
# -*- coding: utf-8 -*-
"""
@author: <EMAIL>
@site: e-smartdata.org
"""
import numpy as np
import pandas as pd
df1 = pd.DataFrame(np.random.rand(10, 4), columns=list('abcd'))
df2 = pd.DataFrame(np.random.rand(10, 4), columns=list('abcd'))
df3 = pd.DataFrame(np.random.rand(10, 4), columns=list('abcd'))
s = pd.Series(np.random.rand(10), name='x')
# %% concat
df = pd.concat([df1, df2, df3], ignore_index=True)
# %%
df = pd.concat([df1, df2, df3])
df.reset_index()
# %%
df1 = pd.DataFrame(np.random.rand(10, 4), columns=list('abcd'))
df2 = pd.DataFrame(np.random.rand(10, 4), columns=list('efgh'))
df = pd.concat([df1, df2])
df = pd.concat([df1, df2], axis=1)
# %%
df1 = df1[::2]
df = pd.concat([df1, df2], axis=1, join='outer')
df = pd.concat([df1, df2], axis=1, join='inner')
# %%
# append Series to DataFrame
| pd.concat([df1, s]) | pandas.concat |
# -*- coding: UTF-8 -*-
# import matplotlib as mpl
# mpl.use('Agg')
import time
import datetime
from sqlalchemy import create_engine
from configparser import ConfigParser
import pandas as pd
import matplotlib.pyplot as plt
import tushare as ts
import math
import sys
reload(sys) # Python2.5 初始化后会删除 sys.setdefaultencoding 这个方法,我们需要重新载入
sys.setdefaultencoding('utf-8')
cf = ConfigParser()
cf.read('./gpst.conf')
dbHost = cf.get("db", "dbHost")
dbPort = cf.get("db", "dbPort")
dbUser = cf.get("db", "dbUser")
dbPass = cf.get("db", "dbPass")
dbName = cf.get("db", "dbName")
engine = create_engine(
"mysql://" + dbUser + ":" + dbPass + "@" + dbHost + ":" + dbPort + "/" + dbName + "?charset=utf8")
conn = engine.connect()
# 获取n天前日期
def getNdatAgo(date, n):
t = time.strptime(date, "%Y-%m-%d")
y, m, d = t[0:3]
Date = str(datetime.datetime(y, m, d) - datetime.timedelta(n)).split()
return Date[0]
'''
传入Num返回数字位数
'''
def getLength(num):
numStr = str(num)
place = numStr.find('.')
if (-1 == place) :
length = len(numStr)
else:
tempStr = numStr[0 : place]
length = len(tempStr)
return pow(10, length)
def draw(code) :
# 获取数据
# df = ts.get_k_data(code, start="2017-09-01")
tDate = time.strftime("%Y-%m-%d", time.localtime())
nDate = getNdatAgo(tDate, 365)
sql = "SELECT * FROM finance.tick_data WHERE code = '" + code + "' AND `date` > '" + nDate + "'"
df = | pd.read_sql(sql, con=engine) | pandas.read_sql |
# -*- coding: utf-8 -*-
import json
from datetime import datetime
import pandas as pd
import numpy as np
from sqlalchemy import func
from findy import findy_config
from findy.interface import Region, Provider
from findy.database.schema.fundamental.dividend_financing import SpoDetail, DividendFinancing
from findy.database.plugins.eastmoney.common import EastmoneyPageabeDataRecorder
from findy.database.context import get_db_session
from findy.utils.time import now_pd_timestamp
from findy.utils.convert import to_float
from findy.utils.kafka import connect_kafka_producer, publish_message
from findy.utils.progress import progress_topic, progress_key
class SPODetailRecorder(EastmoneyPageabeDataRecorder):
region = Region.CHN
provider = Provider.EastMoney
data_schema = SpoDetail
url = 'https://emh5.eastmoney.com/api/FenHongRongZi/GetZengFaMingXiList'
page_url = url
path_fields = ['ZengFaMingXiList']
def get_original_time_field(self):
return 'ZengFaShiJian'
def format(self, entity, df):
df['spo_issues'] = df['ShiJiZengFa'].apply(lambda x: to_float(x))
df['spo_price'] = df['ZengFaJiaGe'].apply(lambda x: to_float(x))
df['spo_raising_fund'] = df['ShiJiMuJi'].apply(lambda x: to_float(x))
df.update(df.select_dtypes(include=[np.number]).fillna(0))
if 'timestamp' not in df.columns:
df['timestamp'] = pd.to_datetime(df[self.get_original_time_field()])
elif not isinstance(df['timestamp'].dtypes, datetime):
df['timestamp'] = | pd.to_datetime(df['timestamp']) | pandas.to_datetime |
#coding=utf-8
import pandas as pd
import time
import datetime
import matplotlib.pyplot as plt
import xlrd
import numpy as np
from matplotlib.dates import DayLocator, HourLocator, DateFormatter
from luminol.anomaly_detector import AnomalyDetector
import matplotlib.dates as dates
def timestamp_to_datetime(x):
'''
:param x: timestamp data
:return: YYYY-mm-dd HH:MM:SS
'''
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(x))
def df_to_series(data_df):
series_tmp = data_df.ix[:, 1]
series_tmp.index = data_df.ix[:, 0].apply(lambda x: datetime.datetime.strptime(x, FORMAT1))
return series_tmp
def series_to_df(data_series):
data_frame= pd.DataFrame(list(zip(data_series.index, data_series.values)))
data_frame.columns = ['kpi_time', 'kpi_value']
return data_frame
def series_to_csv(write_path, data_series):
df = series_to_df(data_series)
df.to_csv(write_path, index=False, header=False)
def score_to_df(score_data):
temp = []
for timestamp, value in score_data.iteritems():
temp.append([timestamp_to_datetime(timestamp/1000), value])
temp_df = | pd.DataFrame(temp, columns=['kpi_time', 'kpi_value']) | pandas.DataFrame |
import sys
import pandas as pd
DAILY_LTLA_FILE = "ltla_daily_cases.csv"
SGTF_FILE = "ltla_sgtf.xlsx"
GEOCODE_LOOKUP_FILE = (
"Local_Authority_Districts_(December_2017)_Boundaries_in_Great_Britain.csv"
)
OUTPUT = "uk-ltla.csv"
MERGE_ERROR_MSG = """
Error: Merge happened incorrectly
The newCasesBySpecimenDate column is empty.
One of the reasons merge could have resulted in an empty column
is setting the incorrect parameter in resample(), try setting
the weekly resampling for a different day of the week."""
def area_weekly_cases(group):
area_name, area_df = group
df = area_df["newCasesBySpecimenDate"].resample("W-MON").agg("sum").reset_index()
df["areaName"] = area_name
return df
def get_weekly_ltla_cases(filename):
df = pd.read_csv(filename)
df["date"] = | pd.to_datetime(df["date"]) | pandas.to_datetime |
# Copyright 2021 Research Institute of Systems Planning, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from functools import cached_property, lru_cache
from logging import getLogger
from typing import Callable, Dict, List, Optional, Sequence, Union
import numpy as np
import pandas as pd
from .ros2_tracing.data_model import Ros2DataModel
from .value_objects import (CallbackGroupValueLttng, NodeValueLttng,
PublisherValueLttng,
SubscriptionCallbackValueLttng,
TimerCallbackValueLttng)
from ...common import Util
from ...exceptions import InvalidArgumentError
from ...value_objects import ExecutorValue, NodeValue, Qos
logger = getLogger(__name__)
class LttngInfo:
def __init__(self, data: Ros2DataModel):
self._formatted = DataFrameFormatted(data)
# TODO(hsgwa): check rmw_impl for each process.
self._rmw_implementation = data.rmw_impl.iloc[0, 0] if len(data.rmw_impl) > 0 else ''
# self._source = records_source
# self._binder_cache: Dict[str, PublisherBinder] = {}
self._timer_cb_cache: Dict[str, Sequence[TimerCallbackValueLttng]] = {}
self._sub_cb_cache: Dict[str, List[SubscriptionCallbackValueLttng]] = {}
self._pub_cache: Dict[str, List[PublisherValueLttng]] = {}
self._cbg_cache: Dict[str, List[CallbackGroupValueLttng]] = {}
self._id_to_topic: Dict[str, str] = {}
self._sub_cb_cache_without_pub: Optional[Dict[str, List[SubscriptionCallbackValueLttng]]]
self._sub_cb_cache_without_pub = None
self._timer_cb_cache_without_pub: Optional[Dict[str, List[TimerCallbackValueLttng]]]
self._timer_cb_cache_without_pub = None
def _get_timer_cbs_without_pub(self, node_id: str) -> List[TimerCallbackValueLttng]:
if self._timer_cb_cache_without_pub is None:
self._timer_cb_cache_without_pub = self._load_timer_cbs_without_pub()
if node_id not in self._timer_cb_cache_without_pub:
return []
return self._timer_cb_cache_without_pub[node_id]
def _get_sub_cbs_without_pub(self, node_id: str) -> List[SubscriptionCallbackValueLttng]:
if self._sub_cb_cache_without_pub is None:
self._sub_cb_cache_without_pub = self._load_sub_cbs_without_pub()
if node_id not in self._sub_cb_cache_without_pub:
return []
return self._sub_cb_cache_without_pub[node_id]
def get_rmw_impl(self) -> str:
"""
Get rmw implementation.
Returns
-------
str
rmw_implementation
"""
return self._rmw_implementation
def _load_timer_cbs_without_pub(self) -> Dict[str, List[TimerCallbackValueLttng]]:
timer_cbs_info: Dict[str, List[TimerCallbackValueLttng]] = {}
for node in self.get_nodes():
timer_cbs_info[node.node_id] = []
timer_df = self._formatted.timer_callbacks_df
timer_df = merge(timer_df, self._formatted.nodes_df, 'node_handle')
for _, row in timer_df.iterrows():
node_name = row['node_name']
node_id = row['node_id']
timer_cbs_info[node_id].append(
TimerCallbackValueLttng(
callback_id=row['callback_id'],
node_name=node_name,
node_id=row['node_id'],
symbol=row['symbol'],
period_ns=row['period_ns'],
timer_handle=row['timer_handle'],
publish_topic_names=None,
callback_object=row['callback_object']
)
)
return timer_cbs_info
def _get_timer_callbacks(self, node: NodeValue) -> Sequence[TimerCallbackValueLttng]:
node_id = node.node_id
assert node_id is not None
timer_cbs = self._get_timer_cbs_without_pub(node_id)
# if node_id not in self._binder_cache.keys():
# self._binder_cache[node_id] = PublisherBinder(self, self._source)
# binder = self._binder_cache[node_id]
# if binder.can_bind(node) and len(timer_cbs) > 0:
# timer_cbs = binder.bind_pub_topics_and_timer_cbs(node_id, timer_cbs)
return timer_cbs
def get_timer_callbacks(self, node: NodeValue) -> Sequence[TimerCallbackValueLttng]:
"""
Get timer callbacks information.
Parameters
----------
node_name : str
target node name.
Returns
-------
Sequence[TimerCallbackInfo]
"""
def get_timer_cb_local(node: NodeValueLttng):
node_id = node.node_id
if node.node_id not in self._timer_cb_cache.keys():
self._timer_cb_cache[node_id] = self._get_timer_callbacks(node)
return self._timer_cb_cache[node_id]
if node.node_id is None:
return Util.flatten([
get_timer_cb_local(node)
for node
in self._get_nodes(node.node_name)
])
node_lttng = NodeValueLttng(node.node_name, node.node_id)
return get_timer_cb_local(node_lttng)
@lru_cache
def get_nodes(self) -> Sequence[NodeValueLttng]:
"""
Get node name list.
Returns
-------
Sequence[NodeValue]
node names.
"""
nodes_df = self._formatted.nodes_df
nodes = []
added_nodes = set()
duplicate_nodes = set()
for _, row in nodes_df.iterrows():
node_name = row['node_name']
node_id = row['node_id']
if node_name in added_nodes:
duplicate_nodes.add(node_name)
added_nodes.add(node_name)
nodes.append(NodeValueLttng(node_name, node_id))
for duplicate_node in duplicate_nodes:
logger.warning(
f'Duplicate node. node_name = {duplicate_node}. '
'The measurement results may be incorrect.')
return nodes
def _load_sub_cbs_without_pub(
self
) -> Dict[str, List[SubscriptionCallbackValueLttng]]:
sub_cbs_info: Dict[str, List[SubscriptionCallbackValueLttng]] = {}
for node in self.get_nodes():
sub_cbs_info[node.node_id] = []
sub_df = self._formatted.subscription_callbacks_df
sub_df = merge(sub_df, self._formatted.nodes_df, 'node_handle')
tilde_sub = self._formatted.tilde_subscriptions_df
sub_df = pd.merge(sub_df, tilde_sub, on=['node_name', 'topic_name'], how='left')
sub_df = sub_df.astype({'tilde_subscription': 'Int64'})
for _, row in sub_df.iterrows():
node_name = row['node_name']
node_id = row['node_id']
tilde_subscription = row['tilde_subscription']
if tilde_subscription is pd.NA:
tilde_subscription = None
# Since callback_object_intra contains nan, it is of type np.float.
record_callback_object_intra = row['callback_object_intra']
if record_callback_object_intra is pd.NA:
callback_object_intra = None
else:
callback_object_intra = int(record_callback_object_intra)
self._id_to_topic[row['callback_id']] = row['topic_name']
sub_cbs_info[node_id].append(
SubscriptionCallbackValueLttng(
callback_id=row['callback_id'],
node_id=node_id,
node_name=node_name,
symbol=row['symbol'],
subscribe_topic_name=row['topic_name'],
publish_topic_names=None,
subscription_handle=row['subscription_handle'],
callback_object=row['callback_object'],
callback_object_intra=callback_object_intra,
tilde_subscription=tilde_subscription
)
)
return sub_cbs_info
def _get_subscription_callback_values(
self,
node: NodeValue
) -> List[SubscriptionCallbackValueLttng]:
node_id = node.node_id
assert node_id is not None
sub_cbs_info: List[SubscriptionCallbackValueLttng]
sub_cbs_info = self._get_sub_cbs_without_pub(node_id)
# if node_id not in self._binder_cache.keys():
# self._binder_cache[node_id] = PublisherBinder(self, self._source)
# binder = self._binder_cache[node_id]
# if binder.can_bind(node):
# sub_cbs_info = binder.bind_pub_topics_and_sub_cbs(node_id, sub_cbs_info)
return sub_cbs_info
def get_subscription_callbacks(
self,
node: NodeValue
) -> Sequence[SubscriptionCallbackValueLttng]:
"""
Get subscription callbacks infomation.
Parameters
----------
node_name : str
target node name.
Returns
-------
Sequence[SubscriptionCallbackInfo]
"""
def get_sub_cb_local(node: NodeValueLttng):
node_id = node.node_id
if node_id not in self._sub_cb_cache.keys():
self._sub_cb_cache[node_id] = self._get_subscription_callback_values(node)
return self._sub_cb_cache[node_id]
if node.node_id is None:
return Util.flatten([
get_sub_cb_local(node)
for node
in self._get_nodes(node.node_name)
])
node_lttng = NodeValueLttng(node.node_name, node.node_id)
return get_sub_cb_local(node_lttng)
@property
def tilde_sub_id_map(self) -> Dict[int, int]:
return self._formatted.tilde_sub_id_map
def _get_publishers(self, node: NodeValueLttng) -> List[PublisherValueLttng]:
node_id = node.node_id
# if node_id not in self._binder_cache.keys():
# self._binder_cache[node_id] = PublisherBinder(self, self._source)
# binder = self._binder_cache[node_id]
# if not binder.can_bind(node):
return self.get_publishers_without_cb_bind(node_id)
cbs: List[Union[TimerCallbackValueLttng,
SubscriptionCallbackValueLttng]] = []
cbs += self.get_timer_callbacks(node)
cbs += self.get_subscription_callbacks(node)
pubs_info = self.get_publishers_without_cb_bind(node_id)
for i, pub_info in enumerate(pubs_info):
topic_name = pub_info.topic_name
cbs_pubs = Util.filter_items(
lambda x: topic_name in x.publish_topic_names, cbs)
cb_ids = tuple(c.callback_id for c in cbs_pubs)
pubs_info[i] = PublisherValueLttng(
node_name=pub_info.node_name,
node_id=pub_info.node_id,
topic_name=pub_info.topic_name,
callback_ids=cb_ids,
publisher_handle=pub_info.publisher_handle
)
return pubs_info
def get_publishers(self, node: NodeValue) -> List[PublisherValueLttng]:
"""
Get publishers information.
Parameters
----------
node: NodeValue
target node.
Returns
-------
List[PublisherInfo]
"""
def get_publishers_local(node: NodeValueLttng):
node_id = node.node_id
if node_id not in self._pub_cache.keys():
self._pub_cache[node_id] = self._get_publishers(node)
return self._pub_cache[node_id]
if node.node_id is None:
return Util.flatten([
get_publishers_local(node)
for node
in self._get_nodes(node.node_name)
])
node_lttng = NodeValueLttng(node.node_name, node.node_id)
return get_publishers_local(node_lttng)
def _get_nodes(
self,
node_name: str
) -> Sequence[NodeValueLttng]:
return Util.filter_items(lambda x: x.node_name == node_name, self.get_nodes())
def get_publishers_without_cb_bind(self, node_id: str) -> List[PublisherValueLttng]:
"""
Get publishers information.
Parameters
----------
node_name : str
target node name.
Returns
-------
List[PublisherInfo]
"""
pub_df = self._formatted.publishers_df
pub_df = merge(pub_df, self._formatted.nodes_df, 'node_handle')
tilde_pub = self._formatted.tilde_publishers_df
pub_df = pd.merge(pub_df, tilde_pub, on=['node_name', 'topic_name'], how='left')
pub_df = pub_df.astype({'tilde_publisher': 'Int64'})
pubs_info = []
for _, row in pub_df.iterrows():
if row['node_id'] != node_id:
continue
tilde_publisher = row['tilde_publisher']
if tilde_publisher is pd.NA:
tilde_publisher = None
pubs_info.append(
PublisherValueLttng(
node_name=row['node_name'],
topic_name=row['topic_name'],
node_id=row['node_id'],
callback_ids=None,
publisher_handle=row['publisher_handle'],
tilde_publisher=tilde_publisher
)
)
return pubs_info
def _is_user_made_callback(
self,
callback_id: str
) -> bool:
is_subscription = callback_id in self._id_to_topic.keys()
if not is_subscription:
return True
topic_name = self._id_to_topic[callback_id]
return topic_name not in ['/clock', '/parameter_events']
def _get_callback_groups(
self,
node_id: str
) -> List[CallbackGroupValueLttng]:
concate_target_dfs = []
concate_target_dfs.append(self._formatted.timer_callbacks_df)
concate_target_dfs.append(self._formatted.subscription_callbacks_df)
try:
column_names = [
'callback_group_addr', 'callback_id', 'node_handle'
]
concat_df = concat(column_names, concate_target_dfs)
concat_df = merge(
concat_df, self._formatted.nodes_df, 'node_handle')
concat_df = merge(
concat_df, self._formatted.callback_groups_df, 'callback_group_addr')
cbgs = []
for _, group_df in concat_df.groupby(['callback_group_addr']):
row = group_df.iloc[0, :]
node_id_ = row['node_id']
if node_id != node_id_:
continue
callback_ids = tuple(group_df['callback_id'].values)
callback_ids = tuple(Util.filter_items(self._is_user_made_callback, callback_ids))
cbgs.append(
CallbackGroupValueLttng(
callback_group_type_name=row['group_type_name'],
node_name=row['node_name'],
node_id=node_id,
callback_ids=callback_ids,
callback_group_id=row['callback_group_id'],
callback_group_addr=row['callback_group_addr'],
executor_addr=row['executor_addr'],
)
)
return cbgs
except KeyError:
return []
def get_callback_groups(
self,
node: NodeValue
) -> Sequence[CallbackGroupValueLttng]:
"""
Get callback groups value.
Returns
-------
List[CallbackGroupInfo]
"""
def get_cbg_local(node: NodeValueLttng):
node_id = node.node_id
if node_id not in self._cbg_cache:
self._cbg_cache[node_id] = self._get_callback_groups(node.node_id)
return self._cbg_cache[node_id]
if node.node_id is None:
return Util.flatten([
get_cbg_local(node)
for node
in self._get_nodes(node.node_name)
])
node_lttng = NodeValueLttng(node.node_name, node.node_id)
return get_cbg_local(node_lttng)
def get_executors(self) -> List[ExecutorValue]:
"""
Get executors information.
Returns
-------
List[ExecutorInfo]
"""
exec_df = self._formatted.executor_df
cbg_df = self._formatted.callback_groups_df
exec_df = merge(exec_df, cbg_df, 'executor_addr')
execs = []
for _, group in exec_df.groupby('executor_addr'):
row = group.iloc[0, :]
executor_type_name = row['executor_type_name']
cbg_ids = group['callback_group_id'].values
execs.append(
ExecutorValue(
executor_type_name,
tuple(cbg_ids))
)
return execs
def get_publisher_qos(self, publisher: PublisherValueLttng) -> Qos:
df = self._formatted.publishers_df
pub_df = df[df['publisher_handle'] == publisher.publisher_handle]
if len(pub_df) == 0:
raise InvalidArgumentError('No publisher matching the criteria was found.')
if len(pub_df) > 1:
logger.warning(
'Multiple publishers matching your criteria were found.'
'The value of the first publisher qos will be returned.')
depth = int(pub_df['depth'].values[0])
return Qos(depth)
def get_subscription_qos(self, callback: SubscriptionCallbackValueLttng) -> Qos:
df = self._formatted.subscription_callbacks_df
sub_df = df[df['callback_object'] == callback.callback_object]
if len(sub_df) == 0:
raise InvalidArgumentError('No subscription matching the criteria was found.')
if len(sub_df) > 1:
logger.warning(
'Multiple publishers matching your criteria were found.'
'The value of the first publisher qos will be returned.')
depth = int(sub_df['depth'].values[0])
return Qos(depth)
# class PublisherBinder:
# TARGET_RECORD_MAX_INDEX = 10
# def __init__(self, lttng_info: LttngInfo, records_source: RecordsSource) -> None:
# self._info = lttng_info
# self._source = records_source
# self._callback_records_cache: Optional[RecordsInterface] = None
# self._intra_comm_records_cache: Optional[RecordsInterface] = None
# self._inter_comm_records_cache: Optional[RecordsInterface] = None
# def can_bind(self, node: NodeValue) -> bool:
# """
# If all callbacks in a node are exclusive, the publisher can be tied to the callback.
# Parameters
# ----------
# node_name : str
# [description]
# Returns
# -------
# bool
# """
# # implementation is mostly done, but the processing time is huge, so it is not practical.
# # Disable it until the speedup is complete.
# return False
# cbgs: Sequence[CallbackGroupValueLttng]
# cbgs = self._info.get_callback_groups(node)
# # TODO: ignore /parameter_events, /clock
# # Ignore callback groups that have no callbacks added,
# # as they are irrelevant to performance.
# # if len(callback_ids) == 0:
# # self._ignored_callback_groups.add(row['callback_group_id'])
# # continue
# if len(cbgs) != 1:
# print('false')
# return False
# cbg = cbgs[0]
# if cbg.callback_group_type is CallbackGroupType.REENTRANT:
# print('false')
# return False
# print('true')
# return True
# def bind_pub_topics_and_timer_cbs(
# self,
# node_name: str,
# callbacks: Sequence[TimerCallbackValueLttng],
# ) -> List[TimerCallbackValueLttng]:
# """
# Return publisher binded callback values.
# Note:
# This function call takes a long time because binding uses records.
# Parameters
# ----------
# node_name : str
# callbacks_info : Sequence[TimerCallbackValueLttng]
# Returns
# -------
# List[TimerCallbackValueLttng]
# publisher binded callback values.
# """
# callback_list: List[TimerCallbackValueLttng]
# callback_list = list(callbacks)
# # insert empty tuple
# for cb in callbacks:
# self._update_timer_cb_publish_topics(callback_list, cb, ())
# publishers = self._info.get_publishers_without_cb_bind(node_name)
# publishers = Util.filter_items(
# lambda x: x.topic_name not in ['/parameter_events', '/rosout'],
# publishers
# )
# from itertools import product
# from tqdm import tqdm
# it = list(product(publishers, callbacks))
# for publisher, cb in tqdm(it):
# if not self._is_consistent(publisher, cb):
# continue
# topic_names: Tuple[str, ...] = (publisher.topic_name,)
# if cb.publish_topic_names is not None:
# topic_names = topic_names + cb.publish_topic_names
# self._update_timer_cb_publish_topics(
# callback_list, cb, topic_names)
# break
# return callback_list
# def bind_pub_topics_and_sub_cbs(
# self,
# node_name: str,
# callbacks_info: Sequence[SubscriptionCallbackValueLttng],
# ) -> List[SubscriptionCallbackValueLttng]:
# """
# Return publisher binded callback values.
# Note:
# This function call takes a long time because binding uses records.
# Parameters
# ----------
# node_name : str
# callbacks_info : Sequence[SubscriptionCallbackValueLttng]
# Returns
# -------
# List[SubscriptionCallbackValueLttng]
# publisher binded callback values.
# """
# system_topics = ['/parameter_events', '/rosout', '/clock']
# callback_list: List[SubscriptionCallbackValueLttng]
# callback_list = list(callbacks_info)
# callback_list = Util.filter_items(
# lambda x: x.subscribe_topic_name not in system_topics, callback_list)
# # insert empty tuple
# for cb_info in callback_list:
# self._update_sub_cb_publish_topics(callback_list, cb_info, ())
# publishers = self._info.get_publishers_without_cb_bind(node_name)
# publishers = Util.filter_items(lambda x: x.topic_name not in system_topics, publishers)
# from itertools import product
# from tqdm import tqdm
# it = list(product(publishers, callback_list))
# for publisher, cb_info in tqdm(it):
# if not self._is_consistent(publisher, cb_info):
# continue
# topic_names: Tuple[str, ...] = (publisher.topic_name,)
# if cb_info.publish_topic_names is not None:
# topic_names = topic_names + cb_info.publish_topic_names
# self._update_sub_cb_publish_topics(
# callback_list, cb_info, topic_names)
# break
# return callback_list
# def _get_publish_time(
# self,
# publisher_info: PublisherValueLttng
# ) -> Optional[int]:
# def select_record_index(records: RecordsInterface) -> int:
# # Select publish after the initialization is complete.
# # To reduce the search time from the beginning. The smaller the index, the better.
# # Note: intra_porocess cyclic demo is manually publishing the first message.
# return min(len(publisher_records.data)-1, self.TARGET_RECORD_MAX_INDEX)
# publisher_handle = publisher_info.publisher_handle
# publisher_records = self._source.inter_proc_comm_records.clone()
# publisher_records.filter_if(lambda x: x.get(
# 'publisher_handle') == publisher_handle)
# if len(publisher_records) > 0:
# publish_index = select_record_index(publisher_records)
# return publisher_records.data[publish_index].get('rclcpp_publish_timestamp')
# publisher_records = self._source.intra_proc_comm_records.clone()
# publisher_records.filter_if(lambda x: x.get(
# 'publisher_handle') == publisher_handle)
# if len(publisher_records) > 0:
# publish_index = select_record_index(publisher_records)
# return publisher_records.data[publish_index].get('rclcpp_intra_publish_timestamp')
# return None
# def _is_consistent_inter(
# self,
# publish_time: int,
# callback_info: Union[TimerCallbackValueLttng,
# SubscriptionCallbackValueLttng]
# ) -> Optional[bool]:
# callback_object = callback_info.callback_object
# cb_records = self._source.callback_records.clone()
# cb_records.filter_if(lambda x: x.get(
# 'callback_object') == callback_object)
# for data in cb_records.data:
# if 'callback_start_timestamp' not in data.columns:
# continue
# if 'callback_end_timestamp' not in data.columns:
# continue
# if data.get('callback_start_timestamp') < publish_time and \
# publish_time < data.get('callback_end_timestamp'):
# return True
# if data.get('callback_start_timestamp') > publish_time and \
# data.get('callback_end_timestamp') > publish_time:
# return False
# return None
# def _is_consistent_intra(
# self,
# publish_time: int,
# callback: SubscriptionCallbackValueLttng
# ) -> bool:
# callback_object = callback.callback_object_intra
# cb_records = self._source.callback_records.clone()
# cb_records.filter_if(lambda x: x.get(
# 'callback_object') == callback_object)
# for data in cb_records.data:
# if 'callback_start_timestamp' not in data.columns:
# continue
# if 'callback_end_timestamp' not in data.columns:
# continue
# if data.get('callback_start_timestamp') < publish_time and \
# publish_time < data.get('callback_end_timestamp'):
# return True
# if data.get('callback_start_timestamp') > publish_time and \
# data.get('callback_end_timestamp') > publish_time:
# return False
# return False
# def _is_consistent(
# self,
# publisher_info: PublisherValueLttng,
# callback_info: Union[TimerCallbackValueLttng,
# SubscriptionCallbackValueLttng]
# ) -> bool:
# publish_time = self._get_publish_time(publisher_info)
# if publish_time is None:
# return False
# is_consistent = self._is_consistent_inter(publish_time, callback_info)
# if is_consistent is True:
# return True
# if isinstance(callback_info, SubscriptionCallbackValueLttng):
# return self._is_consistent_intra(publish_time, callback_info)
# return False
# @staticmethod
# def _update_timer_cb_publish_topics(
# timer_cbs: List[TimerCallbackValueLttng],
# update_target: TimerCallbackValueLttng,
# publish_topic_names: Tuple[str, ...]
# ) -> None:
# # try:
# index = timer_cbs.index(update_target)
# timer_cbs[index] = TimerCallbackValueLttng(
# callback_id=update_target.callback_id,
# node_id=update_target.node_id,
# node_name=update_target.node_name,
# symbol=update_target.symbol,
# period_ns=update_target.period_ns,
# publish_topic_names=publish_topic_names,
# callback_object=update_target.callback_object
# )
# # except ValueError:
# # print(f'Failed to find item. {update_target}.')
# @staticmethod
# def _update_sub_cb_publish_topics(
# sub_cbs: List[SubscriptionCallbackValueLttng],
# update_target: SubscriptionCallbackValueLttng,
# publish_topic_names: Tuple[str, ...]
# ) -> None:
# index = sub_cbs.index(update_target)
# sub_cbs[index] = SubscriptionCallbackValueLttng(
# callback_id=update_target.callback_id,
# node_id=update_target.node_id,
# node_name=update_target.node_name,
# symbol=update_target.symbol,
# subscribe_topic_name=update_target.subscribe_topic_name,
# publish_topic_names=publish_topic_names,
# callback_object=update_target.callback_object,
# callback_object_intra=update_target.callback_object_intra
# )
class DataFrameFormatted:
def __init__(self, data: Ros2DataModel):
self._executor_df = self._build_executor_df(data)
self._nodes_df = self._build_nodes_df(data)
self._timer_callbacks_df = self._build_timer_callbacks_df(data)
self._sub_callbacks_df = self._build_sub_callbacks_df(data)
self._srv_callbacks_df = self._build_srv_callbacks_df(data)
self._cbg_df = self._build_cbg_df(data)
self._pub_df = self._build_publisher_df(data)
self._tilde_sub = self._build_tilde_subscription_df(data)
self._tilde_pub = self._build_tilde_publisher_df(data)
self._tilde_sub_id_to_sub = self._build_tilde_sub_id_df(data, self._tilde_sub)
@staticmethod
def _ensure_columns(
df: pd.DataFrame,
columns: List[str],
) -> pd.DataFrame:
df_ = df.copy()
for missing_column in set(columns) - set(df.columns):
df_[missing_column] = np.nan
return df_
@cached_property
def tilde_sub_id_map(self) -> Dict[int, int]:
d: Dict[int, int] = {}
for _, row in self._tilde_sub_id_to_sub.iterrows():
d[row['subscription_id']] = row['tilde_subscription']
return d
@property
def timer_callbacks_df(self) -> pd.DataFrame:
"""
Build timer callbacks table.
Parameters
----------
data : Ros2DataModel
Returns
-------
pd.DataFrame
Column
- callback_object
- node_handle
- timer_handle
- callback_group_addr
- period_ns,
- symbol
- callback_id
"""
return self._timer_callbacks_df
@property
def subscription_callbacks_df(self) -> pd.DataFrame:
"""
Build subscription callback table.
Parameters
----------
data : Ros2DataModel
Returns
-------
pd.DataFrame
columns
- callback_object
- callback_object_intra
- node_handle
- subscription_handle
- callback_group_addr
- topic_name
- symbol
- callback_id
- depth
"""
return self._sub_callbacks_df
@property
def nodes_df(self) -> pd.DataFrame:
"""
Build node table.
Parameters
----------
data : Ros2DataModel
Returns
-------
pd.DataFrame
Columns
- node_handle
- node_name
"""
return self._nodes_df
@property
def publishers_df(self) -> pd.DataFrame:
"""
Get publisher info table.
Returns
-------
pd.DataFrame
Columns
- publisher_handle
- node_handle
- topic_name
- depth
"""
return self._pub_df
@property
def services_df(self) -> pd.DataFrame:
"""
Get service info table.
Returns
-------
pd.DataFrame
Columns
- callback_id
- callback_object
- node_handle
- service_handle
- service_name
- symbol
"""
return self._srv_callbacks_df
@property
def executor_df(self) -> pd.DataFrame:
"""
Get executor info table.
Returns
-------
pd.DataFrame
Columns
- executor_addr
- executor_type_name
"""
return self._executor_df
@property
def callback_groups_df(self) -> pd.DataFrame:
"""
Get callback group info table.
Returns
-------
pd.DataFrame
Columns
- callback_group_addr
- executor_addr
- group_type_name
"""
return self._cbg_df
@property
def tilde_publishers_df(self) -> pd.DataFrame:
"""
Get tilde wrapped publisher.
Returns
-------
pd.DataFrame
Columns
- tilde_publisher
- node_name
- topic_name
"""
return self._tilde_pub
@property
def tilde_subscriptions_df(self) -> pd.DataFrame:
"""
Get tilde wrapped subscription.
Returns
-------
pd.DataFrame
Columns
- tilde_subscription
- node_name
- topic_name
"""
return self._tilde_sub
@staticmethod
def _build_publisher_df(
data: Ros2DataModel,
) -> pd.DataFrame:
columns = ['publisher_id', 'publisher_handle', 'node_handle', 'topic_name', 'depth']
df = data.publishers.reset_index()
def to_publisher_id(row: pd.Series):
publisher_handle = row['publisher_handle']
return f'publisher_{publisher_handle}'
df = DataFrameFormatted._add_column(df, 'publisher_id', to_publisher_id)
df = DataFrameFormatted._ensure_columns(df, columns)
return df[columns]
@staticmethod
def _build_executor_df(
data: Ros2DataModel,
) -> pd.DataFrame:
columns = ['executor_id', 'executor_addr', 'executor_type_name']
df = data.executors.reset_index()
df_ = data.executors_static.reset_index()
if len(df_) > 0:
columns_ = columns[1:] # ignore executor_id
df = concat(columns_, [df, df_])
def to_executor_id(row: pd.Series) -> str:
addr = row['executor_addr']
return f'executor_{addr}'
df = DataFrameFormatted._add_column(df, 'executor_id', to_executor_id)
df = DataFrameFormatted._ensure_columns(df, columns)
df = df[columns]
# data.callback_groups returns duplicate results that differ only in timestamp.
# Remove duplicates to make it unique.
df.drop_duplicates(inplace=True)
return df[columns]
@staticmethod
def _build_cbg_df(
data: Ros2DataModel,
) -> pd.DataFrame:
columns = ['callback_group_id', 'callback_group_addr', 'group_type_name', 'executor_addr']
df = data.callback_groups.reset_index()
df_static = data.callback_groups_static.reset_index()
df_static_exec = data.executors_static.reset_index()
if len(df_static) > 0 and len(df_static_exec) > 0:
df_static = merge(df_static, df_static_exec, 'entities_collector_addr')
columns_ = columns[1:] # ignore callback_group_id
df = concat(columns_, [df, df_static])
def to_callback_group_id(row: pd.Series) -> str:
addr = row['callback_group_addr']
return f'callback_group_{addr}'
df = DataFrameFormatted._add_column(df, 'callback_group_id', to_callback_group_id)
df = DataFrameFormatted._ensure_columns(df, columns)
df = df[columns]
# data.callback_groups returns duplicate results that differ only in timestamp.
# Remove duplicates to make it unique.
df.drop_duplicates(inplace=True)
executor_duplicated_indexes = []
for _, group in df.groupby('callback_group_addr'):
if len(group) >= 2:
msg = ('Multiple executors using the same callback group were detected.'
'The last executor will be used. ')
exec_addr = list(group['executor_addr'].values)
msg += f'executor address: {exec_addr}'
logger.warn(msg)
executor_duplicated_indexes += list(group.index)[:-1]
if len(executor_duplicated_indexes) >= 1:
df.drop(index=executor_duplicated_indexes, inplace=True)
df.reset_index(drop=True, inplace=True)
return df
@staticmethod
def _build_timer_callbacks_df(
data: Ros2DataModel,
) -> pd.DataFrame:
columns = [
'callback_id', 'callback_object', 'node_handle', 'timer_handle', 'callback_group_addr',
'period_ns', 'symbol',
]
def callback_id(row: pd.Series) -> str:
cb_object = row['callback_object']
return f'timer_callback_{cb_object}'
try:
df = data.timers.reset_index()
timer_node_links_df = data.timer_node_links.reset_index()
df = merge(df, timer_node_links_df, 'timer_handle')
callback_objects_df = data.callback_objects.reset_index().rename(
columns={'reference': 'timer_handle'})
df = merge(df, callback_objects_df, 'timer_handle')
symbols_df = data.callback_symbols
df = merge(df, symbols_df, 'callback_object')
cbg = data.callback_group_timer.reset_index()
df = merge(df, cbg, 'timer_handle')
df = DataFrameFormatted._add_column(df, 'callback_id', callback_id)
df.rename({'period': 'period_ns'}, inplace=True, axis=1)
df = DataFrameFormatted._ensure_columns(df, columns)
return df[columns]
except KeyError:
return pd.DataFrame(columns=columns)
@staticmethod
def _build_sub_callbacks_df(
data: Ros2DataModel,
) -> pd.DataFrame:
columns = [
'callback_id', 'callback_object', 'callback_object_intra', 'node_handle',
'subscription_handle', 'callback_group_addr', 'topic_name', 'symbol', 'depth'
]
def callback_id(row: pd.Series) -> str:
cb_object = row['callback_object']
return f'subscription_callback_{cb_object}'
try:
df = data.subscriptions.reset_index()
callback_objects_df = DataFrameFormatted._format_subscription_callback_object(data)
df = merge(df, callback_objects_df, 'subscription_handle')
symbols_df = data.callback_symbols.reset_index()
df = merge(df, symbols_df, 'callback_object')
cbg = data.callback_group_subscription.reset_index()
df = merge(df, cbg, 'subscription_handle')
df = DataFrameFormatted._add_column(
df, 'callback_id', callback_id
)
df = DataFrameFormatted._ensure_columns(df, columns)
return df[columns].convert_dtypes()
except KeyError:
return pd.DataFrame(columns=columns).convert_dtypes()
@staticmethod
def _build_srv_callbacks_df(
data: Ros2DataModel,
) -> pd.DataFrame:
columns = [
'callback_id', 'callback_object', 'node_handle',
'service_handle', 'service_name', 'symbol'
]
def callback_id(row: pd.Series) -> str:
cb_object = row['callback_object']
return f'service_callback_{cb_object}'
try:
df = data.services.reset_index()
callback_objects_df = data.callback_objects.reset_index().rename(
{'reference': 'service_handle'}, axis=1)
df = merge(df, callback_objects_df, 'service_handle')
symbols_df = data.callback_symbols.reset_index()
df = merge(df, symbols_df, 'callback_object')
df = DataFrameFormatted._add_column(
df, 'callback_id', callback_id
)
df = DataFrameFormatted._ensure_columns(df, columns)
return df[columns]
except KeyError:
return pd.DataFrame(columns=columns)
@staticmethod
def _build_tilde_subscription_df(
data: Ros2DataModel,
) -> pd.DataFrame:
columns = ['tilde_subscription', 'node_name', 'topic_name']
try:
df = data.tilde_subscriptions.reset_index()
df.rename({'subscription': 'tilde_subscription'}, axis=1, inplace=True)
df = DataFrameFormatted._ensure_columns(df, columns)
return df[columns]
except KeyError:
return pd.DataFrame(columns=columns)
@staticmethod
def _build_tilde_publisher_df(
data: Ros2DataModel,
) -> pd.DataFrame:
columns = ['tilde_publisher', 'tilde_subscription', 'node_name', 'topic_name']
try:
df = data.tilde_publishers.reset_index()
df.rename({'publisher': 'tilde_publisher'}, axis=1, inplace=True)
df = DataFrameFormatted._ensure_columns(df, columns)
return df[columns]
except KeyError:
return | pd.DataFrame(columns=columns) | pandas.DataFrame |
#!/usr/bin/env python3
from datetime import datetime, timedelta
import sys
import json
import re
import pandas as pd
LOGFORMAT="/var/log/nsd/nsd-dnstap.log.%Y%m%d-%H"
def read_data(data):
if isinstance(data, list) and not data:
sys.stderr.write("No valid input supplied!\n")
sys.exit(-1)
ids = []
for datum in data:
datum['ts'] = datetime.utcfromtimestamp(datum['ts'])
ids.append(datum['id'])
if len(set(ids)) != len(ids):
# this means that same probe was queried and failed!
#sys.stderr.write("Double ID detected!\n")
pass
return (data, ids)
def read_dnstap_log(path, logls):
try:
with open(path, 'r') as f:
for line in f:
logls.append(json.loads(line))
except FileNotFoundError:
sys.stderr.write("Could not find {}\n".format(path))
except json.decoder.JSONDecodeError:
sys.stderr.write("Could not parse {}\n".format(path))
def parse_normal(query_pieces):
probe_id = query_pieces[2]
if not len(probe_id):
probe_id = query_pieces[3]
x = query_pieces[3].split('.')
rslv_type = x[0]
#if len(x) < 2:
# import IPython; IPython.embed()
# sys.exit(0)
try:
if 'x' not in x[1]:
mtu = x[1]
else:
mtu = x[2]
except IndexError:
mtu = query_pieces[4].split('.')[1]
return (probe_id, rslv_type, mtu)
def parse_2(query_pieces):
probe_id = query_pieces[0]
pieces = probe_id.split('.')
if len(pieces) != 1:
probe_id = pieces[1]
return probe_id
def check_query(query, datum):
prog = re.compile('[0-9]{4}')
query = query.lower()
query_pieces = query.split('-')
if len(query_pieces) >= 4:
probe_id, rslv_type, mtu = parse_normal(query_pieces)
elif len(query_pieces) == 2:
probe_id = parse_2(query_pieces)
rslv_type = None
mtu = None
elif len(query_pieces) == 1:
try:
m = prog.search(query_pieces[0])
probe_id = m.group()
except AttributeError:
probe_id = None
rslv_type = None
mtu = None
else:
sys.stderr.write("Unknown query found!\n")
sys.stderr.write(query)
#sys.exit(0)
probe_id = None
rslv_type = None
mtu = None
datum.append(probe_id)
datum.append(rslv_type)
datum.append(mtu)
def create_dataframe(logls):
data = []
for log in logls:
datum = []
# time
datum.append(log['message']['query_time'])
# address
datum.append(log['message']['query_address'])
# protocol
datum.append(log['message']['socket_protocol'])
#query
qm = log['message']['query_message']
try:
query = qm.split('QUESTION SECTION:\n;')[1].split('\t')[0].lower()
except IndexError:
# no proper query supplied, invalid
continue
datum.append(query)
# probe_id, type, mtu
check_query(query,datum)
# record
try:
datum.append(qm.split('IN\t ')[1].split('\n')[0])
except IndexError:
datum.append(None)
# EDNS_buffer_size
try:
edns_udp = qm.split('udp: ')
if len(edns_udp) == 1:
# NO BUFFER SIZE
raise IndexError
buf_size = edns_udp[-1].split('\n')[0]
buf_pieces = buf_size.split('id: ')
if len(buf_pieces) > 1:
buf_size = buf_pieces[1]
datum.append(buf_size)
except IndexError:
# no EDNS(0)
datum.append(None)
datum.append(query.split('.')[0].lower())
data.append(datum)
return data
def load_stub_dnstap(results, rslv_type, ip_type):
logls = []
logpaths = []
for result in results:
logpaths.append(result['ts'].strftime(LOGFORMAT))
logpaths = set(logpaths)
for path in logpaths:
read_dnstap_log(path, logls)
data = create_dataframe(logls)
# dont name any column query --> pandas error
columns = ['time','address','protocol','dns_query','probe_id',
'resolver_type','MTU','record','EDNS_buffer_size',
'variable_section']
df = pd.DataFrame(data, columns=columns)
df = df[~df['dns_query'].str.contains('x')]
df['time'] = pd.to_datetime(df['time'])
df['probe_id'] = pd.to_numeric(df['probe_id'], errors='coerce')
#df['MTU'] = df['MTU'].astype(int)
ip = 'A' if ip_type == 4 else 'AAAA'
df = df[df['resolver_type'] == rslv_type]
df = df[df['record'] == ip]
#import IPython; IPython.embed()
return df
def datetime_range(begin, end, step=timedelta(hours=1)):
span = end - begin
dt = timedelta(0)
while dt < span:
yield begin + dt
dt += step
# non Atlas relying log fetcher, duplicate :S
def load_rslv_dnstap(args, ip, resolver):
logls = []
logpaths = []
for dt in datetime_range(datetime.fromisoformat(args.start),
datetime.fromisoformat(args.stop)):
logpaths.append(dt.strftime(LOGFORMAT))
logpaths = list(set(logpaths))
for path in logpaths:
read_dnstap_log(path, logls)
data = create_dataframe(logls)
# dont name any column query --> pandas error
columns = ['time','address','protocol','dns_query','probe_id',
'resolver_type','MTU','record','EDNS_buffer_size',
'variable_section']
df = pd.DataFrame(data, columns=columns)
df['time'] = | pd.to_datetime(df['time']) | pandas.to_datetime |
import pandas as pd
from pandas.testing import assert_frame_equal
from evaluate.report import (
PrecisionReport,
RecallReport,
Report,
DelimNotFoundError,
ReturnTypeDoesNotMatchError
)
from evaluate.classification import AlignmentAssessment
import pytest
from io import StringIO
import math
from tests.common import create_tmp_file, create_recall_report_row, create_precision_report_row
from unittest.mock import patch
class TestReport:
def test___get_report_satisfying_confidence_threshold(self):
report = Report([
pd.read_csv(StringIO(
"""id,GT_CONF
0,2
1,1
2,3
""")),
pd.read_csv(StringIO(
"""id,GT_CONF
4,3
5,1
6,2
"""))
])
actual_report = report.get_report_satisfying_confidence_threshold(2)
expected_report = Report([
pd.read_csv(StringIO(
"""id,GT_CONF
0,2
2,3
4,3
6,2
"""))])
assert actual_report==expected_report
def test___get_value_from_header_fast___field_is_in_header(self):
actual_value = Report.get_value_from_header_fast("FIELD_1=10;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_in_header_between_two_other_fields(self):
actual_value = Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=10;DUMMY_2=99;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_first_before_two_other_fields(self):
actual_value = Report.get_value_from_header_fast("FIELD_1=10;DUMMY_1=asd;DUMMY_2=99;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_last_after_two_other_fields(self):
actual_value = Report.get_value_from_header_fast("DUMMY_1=asd;DUMMY_2=99;FIELD_1=10;", "FIELD_1", int, -1, delim=";")
expected_value = 10
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_not_in_header(self):
actual_value = Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=10;DUMMY_2=99;", "FIELD_2", int, -1, delim=";")
expected_value = -1
assert actual_value==expected_value
def test___get_value_from_header_fast___field_is_in_header___return_type_does_not_match(self):
with pytest.raises(ReturnTypeDoesNotMatchError):
Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=asd;DUMMY_2=99;", "FIELD_1", int, -1, delim=";")
def test___get_value_from_header_fast___field_is_in_header___delim_is_not(self):
with pytest.raises(DelimNotFoundError):
Report.get_value_from_header_fast("DUMMY_1=asd;FIELD_1=asd;DUMMY_2=99;", "FIELD_1", int, -1, delim="~")
def test____create_field_from_header(self):
report = Report([
pd.read_csv(StringIO(
"""id,header
1,SEQ=ACGT;LEN=4;
2,SEQ=TG;LEN=2;
3,dummy
"""))])
report._create_field_from_header("SEQ", "header", str, "A")
report._create_field_from_header("LEN", "header", int, 1)
expected_report = Report([
pd.read_csv(StringIO(
"""id,header,SEQ,LEN
1,SEQ=ACGT;LEN=4;,ACGT,4
2,SEQ=TG;LEN=2;,TG,2
3,dummy,A,1
"""))])
assert report==expected_report
def test____create_good_eval_column(self):
report = Report([
pd.read_csv(StringIO(
"""classification
primary_correct
whatever
secondary_correct
dummy
supplementary_correct
woot
"""))])
report._create_good_eval_column()
expected_report = Report([
pd.read_csv(StringIO(
"""classification,good_eval
primary_correct,True
whatever,False
secondary_correct,True
dummy,False
supplementary_correct,True
woot,False
"""))])
assert report==expected_report
def test_getMaximumGtConf_no_gt_conf_columnRaisesKeyError(self):
report = Report([pd.DataFrame()])
with pytest.raises(KeyError):
report.get_maximum_gt_conf()
def test_getMaximumGtConf_emptyReportReturnsNaN(self):
report = Report([pd.DataFrame(data={"GT_CONF": []})])
actual = report.get_maximum_gt_conf()
assert math.isnan(actual)
def test_getMaximumGtConf_oneGTConfInReportReturnsGTConf(self):
report = Report([pd.DataFrame(data={"GT_CONF": [1.5]})])
actual = report.get_maximum_gt_conf()
expected = 1.5
assert actual == expected
def test_getMaximumGtConf_threeGTConfsInReportReturnsHighest(self):
report = Report([pd.DataFrame(data={"GT_CONF": [1.5, 10.5, 5.0]})])
actual = report.get_maximum_gt_conf()
expected = 10.5
assert actual == expected
def test_getMinimumGtConf_no_gt_conf_columnRaisesKeyError(self):
report = Report([pd.DataFrame()])
with pytest.raises(KeyError):
report.get_minimum_gt_conf()
def test_getMinimumGtConf_emptyReportReturnsNaN(self):
report = Report([pd.DataFrame(data={"GT_CONF": []})])
actual = report.get_minimum_gt_conf()
assert math.isnan(actual)
def test_getMinimumGtConf_oneGTConfInReportReturnsGTConf(self):
report = Report([pd.DataFrame(data={"GT_CONF": [1.5]})])
actual = report.get_minimum_gt_conf()
expected = 1.5
assert actual == expected
def test_getMinimumGtConf_threeGTConfsInReportReturnsHighest(self):
report = Report([pd.DataFrame(data={"GT_CONF": [10.5, 5.0, 0.2]})])
actual = report.get_minimum_gt_conf()
expected = 0.2
assert actual == expected
class TestPrecisionReporter:
def test_init_gtconfIsExtractedCorrectly(self):
columns = ["sample", "query_probe_header", "ref_probe_header", "classification"]
dfs = pd.DataFrame(
data=[
create_precision_report_row(0.0, gt_conf=100),
create_precision_report_row(0.0, gt_conf=100),
create_precision_report_row(0.0, gt_conf=10),
create_precision_report_row(0.0, gt_conf=100),
],
columns=columns,
)
report = PrecisionReport([dfs])
actual = report.report.GT_CONF
expected = pd.Series([100.0, 100.0, 10.0, 100.0])
assert actual.equals(expected)
def test_fromFiles_TwoFilesReturnsValidRecallReport(self):
contents_1 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1; >GT_CONF=1; unmapped
CFT073 >CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2; >CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133; primary_correct
CFT073 >CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3; >GT_CONF=3; unmapped
"""
contents_2 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1260;IV=[50,60);PVID=4;NB_ALL=4;ALL_ID=4;NB_DIFF_ALL_SEQ=4;ALL_SEQ_ID=4; >CHROM=GC00000578_3;SAMPLE=CFT073;POS=165;IV=[25,29);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=3;GT_CONF=3.22199; primary_incorrect
CFT073 >CHROM=1;POS=1262;IV=[60,70);PVID=5;NB_ALL=5;ALL_ID=5;NB_DIFF_ALL_SEQ=5;ALL_SEQ_ID=5; >GT_CONF=5; unmapped
CFT073 >CHROM=1;POS=1281;IV=[70,80);PVID=6;NB_ALL=6;ALL_ID=6;NB_DIFF_ALL_SEQ=6;ALL_SEQ_ID=6; >GT_CONF=6; unmapped
"""
path_1 = create_tmp_file(contents_1)
path_2 = create_tmp_file(contents_2)
contents_1_input = StringIO(contents_1)
contents_2_input = StringIO(contents_2)
dataframes = [
pd.read_csv(contents_1_input, sep="\t", keep_default_na=False),
pd.read_csv(contents_2_input, sep="\t", keep_default_na=False),
]
actual = PrecisionReport.from_files([path_1, path_2])
expected = PrecisionReport(dataframes)
path_1.unlink()
path_2.unlink()
assert actual == expected
class TestRecallReport:
def test_fromFiles_TwoFilesReturnsValidRecallReport(self):
contents_1 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1; >GT_CONF=1; unmapped
CFT073 >CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2; >CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133; primary_correct
CFT073 >CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3; >GT_CONF=3; unmapped
"""
contents_2 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1260;IV=[50,60);PVID=4;NB_ALL=4;ALL_ID=4;NB_DIFF_ALL_SEQ=4;ALL_SEQ_ID=4; >CHROM=GC00000578_3;SAMPLE=CFT073;POS=165;IV=[25,29);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=3;GT_CONF=3.22199; primary_incorrect
CFT073 >CHROM=1;POS=1262;IV=[60,70);PVID=5;NB_ALL=5;ALL_ID=5;NB_DIFF_ALL_SEQ=5;ALL_SEQ_ID=5; >GT_CONF=5; unmapped
CFT073 >CHROM=1;POS=1281;IV=[70,80);PVID=6;NB_ALL=6;ALL_ID=6;NB_DIFF_ALL_SEQ=6;ALL_SEQ_ID=6; >GT_CONF=6; unmapped
"""
path_1 = create_tmp_file(contents_1)
path_2 = create_tmp_file(contents_2)
contents_1_input = StringIO(contents_1)
contents_2_input = StringIO(contents_2)
dataframes = [
pd.read_csv(contents_1_input, sep="\t", keep_default_na=False),
pd.read_csv(contents_2_input, sep="\t", keep_default_na=False),
]
actual = RecallReport.from_files([path_1, path_2])
expected = RecallReport(dataframes)
path_1.unlink()
path_2.unlink()
assert actual == expected
def test_init(self):
contents_1 = """sample query_probe_header ref_probe_header classification
CFT073 >CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1;NB_OF_SAMPLES=10; >GT_CONF=1; unmapped
CFT073 >CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2;NB_OF_SAMPLES=20; >CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133; primary_correct
CFT073 >CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3;NB_OF_SAMPLES=30; >GT_CONF=3; unmapped
"""
contents_1_input = StringIO(contents_1)
dataframes = [pd.read_csv(contents_1_input, sep="\t", keep_default_na=False)]
report = RecallReport(dataframes)
actual_df = report.report
expected_df = pd.read_csv(StringIO(
"""sample query_probe_header ref_probe_header classification GT_CONF PVID NB_ALL ALL_ID NB_DIFF_ALL_SEQ ALL_SEQ_ID NB_OF_SAMPLES good_eval
CFT073 >CHROM=1;POS=1246;IV=[20,30);PVID=1;NB_ALL=1;ALL_ID=1;NB_DIFF_ALL_SEQ=1;ALL_SEQ_ID=1;NB_OF_SAMPLES=10; >GT_CONF=1; unmapped 1.0 1 1 1 1 1 10 False
CFT073 >CHROM=1;POS=1248;IV=[30,40);PVID=2;NB_ALL=2;ALL_ID=2;NB_DIFF_ALL_SEQ=2;ALL_SEQ_ID=2;NB_OF_SAMPLES=20; >CHROM=GC00005358_3;SAMPLE=CFT073;POS=1;IV=[0,17);SVTYPE=PH_SNPs;MEAN_FWD_COVG=3;MEAN_REV_COVG=6;GT_CONF=60.1133; primary_correct 60.1133 2 2 2 2 2 20 True
CFT073 >CHROM=1;POS=1252;IV=[40,50);PVID=3;NB_ALL=3;ALL_ID=3;NB_DIFF_ALL_SEQ=3;ALL_SEQ_ID=3;NB_OF_SAMPLES=30; >GT_CONF=3; unmapped 3.0 3 3 3 3 3 30 False
"""), sep="\t")
assert actual_df.equals(expected_df)
def test_checkIfOnlyBestMappingIsKept_hasPrimaryMapping(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_checkIfOnlyBestMappingIsKept_hasSecondaryMapping(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_CORRECT, gt_conf=100, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_CORRECT, gt_conf=100, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_checkIfOnlyBestMappingIsKept_hasSupplementaryMapping(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_CORRECT, gt_conf=100, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_CORRECT, gt_conf=100, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_checkIfOnlyBestMappingIsKept_ChoosesTheOneWithHighestGTConf(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_CORRECT, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_CORRECT, gt_conf=200, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_CORRECT, gt_conf=150, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_CORRECT, gt_conf=200, with_gt_conf=True)])
assert_frame_equal(actual, expected, check_dtype=False)
def test_checkIfOnlyBestMappingIsKept_hasNoCorrectMapping_ChoosesTheOneWithHighestGTConf(self):
dfs = pd.DataFrame(
data=[
create_recall_report_row("truth_probe_1", AlignmentAssessment.UNMAPPED, gt_conf=100, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PARTIALLY_MAPPED, gt_conf=140, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=150, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SECONDARY_INCORRECT, gt_conf=110, with_gt_conf=True),
create_recall_report_row("truth_probe_1", AlignmentAssessment.SUPPLEMENTARY_INCORRECT, gt_conf=120, with_gt_conf=True),
],
)
report = RecallReport([dfs])
actual = report.report
expected = pd.DataFrame(data=[create_recall_report_row("truth_probe_1", AlignmentAssessment.PRIMARY_INCORRECT, gt_conf=150, with_gt_conf=True)])
| assert_frame_equal(actual, expected, check_dtype=False) | pandas.testing.assert_frame_equal |
"""
2a. Modelling folds
====================
This tutorial will show how Loop Structural improves the modelling of
folds by using an accurate parameterization of folds geometry. This will
be done by: 1. Modelling folded surfaces without structural geology,
i.e. using only data points and adjusting the scalar fields to those
points. 2. Modelling folds using structural geology, which includes: \*
Description of local fold frame and rotation angles calculation \*
Construction of folded foliations using fold geostatistics inside the
fold frame coordinate system
"""
######################################################################
# Imports
# -------
#
from LoopStructural import GeologicalModel
from LoopStructural.datasets import load_noddy_single_fold
from LoopStructural.visualisation import LavaVuModelViewer, RotationAnglePlotter
from LoopStructural.utils.helper import strike_dip_vector, plunge_and_plunge_dir_to_vector
import pandas as pd
import numpy as np
from scipy.interpolate import Rbf
import matplotlib.pyplot as plt
######################################################################
#
#
######################################################################
# Structural geology of folds
# ---------------------------
#
######################################################################
# Folds are one of the most common features found in deformed rocks and
# are defined by the location of higher curvature. The geometry of the
# folded surface can be characterised by three geometrical elements:
#
# 1. the fold hinge is the point of maximum curvature along folded surface
# 2. the axial surface is a surfaces that passes through all curvature
# points in all folded foliations
# 3. the fold axis is the intersection of the folded foliation and the
# axial surface
#
# Modelling folded surfaces using standard implicit algorithms is
# challenging because the implicit modelling methods are generally trying
# to minimise the resulting curvature of the surface. To model folded
# surfaces the geologist will need to characterise the geometry of the
# folded surface in high detail.
#
#
#
######################################################################
# Modelling folded surfaces without structural geology
# ----------------------------------------------------
#
# In the following section we will attempt to model a synthetic fold shape
# that is defined by a sinusoidal folded surface. For simplicity we will
# consider the fold as cylindrical and therefore only consider the fold in
# a 2D plane. The data set has been sampled from a model generated using
# Noddy and represents a fold with a wavelength of ~10km and amplitude of
# ~2km.
#
# The orientation of the structure has been sampled within the model
# volume (10km,7km,5km) at 500m intervals.
#
# **The aim of this exercise is to investigate how standard implicit
# modelling techniques are fundamentally limited when trying to model
# folded surfaces.**
#
# 1. Load data from sample datasets
# 2. Visualise data
# 3. Look at varying degrees of sampling e.g. 200 points, 100 points, 10
# points.
# 4. Look at using data points ONLY from a map surface
#
######################################################################
# Modelling folded surfaces using loop structural
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# load the sample data
data, boundary_points = load_noddy_single_fold()
data.head()
######################################################################
# The input dataset was generated using Noddy by sampling the orientation
# of a structure on a regular grid. We have loaded it into a pandas
# DataFrame, this is basically an excel spreadsheet for python. Above are
# the first 5 rows of the dataset and as we can see it is regularly
# sampled with data points being sampled regularly along the :math:`z`,
# :math:`y` and :math:`x` axes. In order to avoid artefacts due to the
# sampling errors we will shuffle the data. We can do this using the
# ``random`` column in the DataFrame (ensuring everyone has the same
# data).
#
data = data.sort_values('random') # sort the data by a random int then we can select N random points
data.head()
######################################################################
# The data points are now randomly ordered and can now be subsampled by
# choosing the first N samples from the dataframe
#
# .. code:: python
#
# data[:100]
#
# returns the first 100 data points from the array
#
######################################################################
# Testing data density
# ~~~~~~~~~~~~~~~~~~~~
#
# - Use the toggle bar to change the amount of data used by the
# interpolation algorithm.
# - How does the shape of the fold change as we remove data points?
# - Now what happens if we only consider data from the map view?
#
# **HINT** you can view the strike and dip data by unchecking the scalar
# field box.
#
# **The black arrows are the normal vector to the folded surface**
#
npoints = 20
model = GeologicalModel(boundary_points[0,:],boundary_points[1,:])
model.set_model_data(data[:npoints])
stratigraphy = model.create_and_add_foliation("s0",interpolatortype="PLI",nelements=5000,buffer=0.3,cgw=0.1)#.2)
viewer = LavaVuModelViewer(model,background="white")
# viewer.add_scalar_field(model.bounding_box,(38,55,30),
# 'box',
# paint_with=stratigraphy,
# cmap='prism')
viewer.add_data(stratigraphy)
viewer.add_isosurface(stratigraphy,
)
viewer.rotate([-85.18760681152344, 42.93233871459961, 0.8641873002052307])
viewer.display()
######################################################################
# Modelling folds using structural geology
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The following section will describe how the fold axis, fold axial
# surface and fold vergence can be used to help constrain the shape of the
# folded surface. To do this we need to build a fold frame which is
# curvilinear coordinate system based around the fold axis and the fold
# axial surface.
#
# There are three coordinates to the fold frame: \* coordinate 0 is the
# axial surface of the fold and is parallel to the axial foliation \*
# coordinate 1 is the fold axis direction field and is orthogonal to the
# axial foliation \* coordinate 2 is orthogonal to both the fold axis
# direction field and axial foliation and is roughly parallel to the
# extension direction of the fold
#
# Three direction vectors are defined by the normalised gradient of these
# fields: \* :math:`e_0` - red \* :math:`e_1` - green \* :math:`e_2` -
# blue
#
# The orientation of the folded foliation can be defined by rotating
# :math:`e_1` around :math:`e_0` by the fold axis rotation angle
# :math:`\alpha_P` to give the orientation of the fold axis. The
# orientation of the folded foliation can then be defined by rotating the
# plane defined by the fold axis and :math:`e_0` around the fold axis by
# the fold limb rotation angle :math:`\alpha_L`.
#
# Calculating the fold rotation angles
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The rotation angles can be calculated for observations of the folded
# foliation and assocaited lineations. For example, the fold axis rotation
# angle is found by calculating the angle between the gradient of the fold
# axis direction field and the intersection lineations shown in A). The
# fold limb rotation angle is found by finding the the angle to rotate the
# folded foliation to be parallel to the plane of the axial foliation
# shown in B and C.
#
mdata = | pd.concat([data[:npoints],data[data['feature_name']=='s1']]) | pandas.concat |
#! python
import os
import pandas as pd
BASEDIR = os.path.dirname(__file__)
WEATHERFILE = os.path.join(BASEDIR, 'onemin-WS_1-2017')
GROUNDFILE = os.path.join(BASEDIR, 'onemin-Ground-2017')
EASTERN_TZ = 'Etc/GMT+5'
# LATITUDE, LONGITUDE = 39.1374, -77.2187 # weather station
# LATITUDE, LONGITUDE = 39.1319, -77.2141 # ground array
HORIZON_ZENITH = 90.0 # degrees
GHI_THRESH = 0 # [W/m^2]
# read in data from NIST, parse times, and set them as the indices
ws_data = []
gnd_data = []
for month in range(12):
ws_file = os.path.join(WEATHERFILE,
'onemin-WS_1-2017-{:02d}.csv'.format(month+1))
gnd_file = os.path.join(GROUNDFILE,
'onemin-Ground-2017-{:02d}.csv'.format(month+1))
ws_data.append(pd.read_csv(ws_file, index_col='TIMESTAMP',
parse_dates=True))
gnd_data.append(pd.read_csv(gnd_file, index_col='TIMESTAMP',
parse_dates=True))
ws_data = pd.concat(ws_data)
gnd_data = | pd.concat(gnd_data) | pandas.concat |
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_vulnerabilidad.ipynb (unless otherwise specified).
__all__ = ['show_feature_importances', 'mostrar_coeficientes_PLS', 'agregar_conteo_pruebas',
'agregar_tasas_municipales', 'caracteristicas_modelos_municipios', 'ajustar_pls_letalidad',
'ajustar_pls_columna', 'ajustar_rf_letalidad', 'calificar_municipios_letalidad',
'calificar_municipios_letalidad_formato_largo', 'seleccionar_caracteristicas',
'calcular_indices_vulnerabilidad', 'calcular_indices_vulnerabilidad_formato_largo',
'calcular_periodo_vulnerabilidad', 'periodo_vulnerabilidad_con_dataframe',
'calcular_periodo_vulnerabilidad_2', 'agregar_periodo_vulnerabilidad', 'calcular_vulnerabilidad_urbana',
'agregar_vulnerabilidad_entidades', 'guardar_resultados_csv', 'mapas_serie_letalidad',
'mapas_serie_vulnerabilidad', 'guardar_shape_vulnerabilidad', 'checkpoint_vulnerabilidad',
'cargar_checkpoint_vulnerabilidad']
# Cell
from ipywidgets import IntProgress
from IPython.display import display
import time
from .datos import *
import pandas as pd
import geopandas as gpd
import glob
import os
import matplotlib.pyplot as plt
import numpy as np
from datetime import timedelta
import datetime
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.cross_decomposition import PLSRegression
import imageio
from pygifsicle import optimize
# Cell
def show_feature_importances(rf):
importances = rf.feature_importances_
std = np.std([tree.feature_importances_ for tree in rf.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %s (%f)" % (f + 1, X.columns[indices[f]], importances[indices[f]]))
# Plot the feature importances of the forest
f, ax = plt.subplots(figsize=(15, 10))
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), X.columns[indices])
plt.xlim([-1, X.shape[1]])
plt.show()
# Cell
def mostrar_coeficientes_PLS(pls):
f, ax = plt.subplots(figsize=(15, 10))
sns.barplot(y='nombre', x='coef', data=feats_df, color='c')
# Cell
def agregar_conteo_pruebas(covid_municipal, solo_covid=True):
cols_localidad = ['ENTIDAD_RES', 'MUNICIPIO_RES', 'CLAVE_MUNICIPIO_RES']
count_tested = covid_municipal[cols_localidad + ['conteo']].groupby(cols_localidad).sum()
count_tested.reset_index(inplace=True)
count_tested.rename(columns={'conteo': 'total_pruebas'}, inplace=True)
covid_municipal = covid_municipal.merge(count_tested, on=cols_localidad, how='left')
covid_municipal['casos_frac'] = 100 * covid_municipal['conteo'] / covid_municipal['total_pruebas']
covid_municipal['tasa_covid_letal'] = 100 * covid_municipal['defunciones'] / covid_municipal['conteo']
if solo_covid:
covid_municipal = covid_municipal.query('RESULTADO == "Positivo SARS-CoV-2"')
return covid_municipal
# Cell
def agregar_tasas_municipales(casos_mun_df):
casos_mun_df['covid_confirmados_100k'] = 100000 * casos_mun_df['conteo'] / casos_mun_df['pob2020']
casos_mun_df['covid_defun_100k'] = 100000 * casos_mun_df['defunciones'] / casos_mun_df['pob2020']
casos_mun_df['tasa_covid_letal'] = 100 * casos_mun_df['defunciones'] / casos_mun_df['conteo']
# covid_municipal = covid_municipal.query('RESULTADO == "Positivo SARS-CoV-2"').copy()
# casos_mun_df = gpd.GeoDataFrame(casos_mun_df, geometry='geometry')
return casos_mun_df
# Cell
def caracteristicas_modelos_municipios(mun_df, poblaciones=False, i_vuln=False):
pob_vars = []
if not poblaciones:
pob_vars = list(mun_df.columns[mun_df.columns.str.contains('_pob')])
pob_vars = pob_vars + ['mayores_65', 'pob2020', 'pt_2015', 'pob_menore', 'poblacion', 'pob_total']
pob_vars = pob_vars + ['sin_dere_1', 'sin_derech', 'carencias_']
i_vuln_vars = []
if not i_vuln:
i_vuln_vars = ['i_vuln_salud', 'i_vuln_cobertura', 'i_vuln_econo', 'i_vuln_social',
'i_vuln_gen', 'i_vuln_infraestructura']
columnas_numericas = mun_df.select_dtypes(include=np.number).columns
otras_vars = ['covid_defun_100k', 'tasa_covid_letal', 'defunciones',
'INTUBADO_BIN', 'UCI_BIN', 'UCIs', 'tasa_uci', 'total_pruebas',
'area_cart', 'area', 'densi', 'casos_frac', 'area_km2', 'conteo',
'oid', 'covid_confrimados_100k', 'id', 'vulnerabilidad_ambiental_num',
'covid_confirmados_100k', 'index']
caracteristicas = list(set(columnas_numericas).difference(pob_vars + i_vuln_vars + otras_vars))
return caracteristicas
# Cell
def ajustar_pls_letalidad(municipios_df, caracteristicas, min_casos=20, min_defunciones=0):
data_train = municipios_df.loc[municipios_df[caracteristicas].notna().all(axis=1)]
X = data_train.query(f'(conteo > {min_casos}) & (defunciones > {min_defunciones})')[caracteristicas]
Y = data_train.query(f'(conteo > {min_casos}) & (defunciones > {min_defunciones})')['tasa_covid_letal']
# X['i_vuln_econo'] = -X['i_vuln_econo']
pls2 = PLSRegression(n_components=1)
pls2.fit(X, Y)
pls2.coef_ = pls2.coef_.flatten()
return pls2
# Cell
def ajustar_pls_columna(municipios_df, caracteristicas, columna, min_casos=20, min_defunciones=0):
data_train = municipios_df.loc[municipios_df[caracteristicas].notna().all(axis=1)]
X = data_train.query(f'(conteo > {min_casos}) & (defunciones > {min_defunciones})')[caracteristicas]
try:
Y = data_train.query(f'(conteo > {min_casos}) & (defunciones > {min_defunciones})')[columna]
except KeyError:
print(f"No existe la columna {columna}")
# X['i_vuln_econo'] = -X['i_vuln_econo']
pls2 = PLSRegression(n_components=1)
pls2.fit(X, Y)
pls2.coef_ = pls2.coef_.flatten()
return pls2
# Cell
def ajustar_rf_letalidad(municipios_df, caracteristicas, min_casos=20, min_defunciones=0, random_seed=0):
data_train = municipios_df.loc[municipios_df[caracteristicas].notna().all(axis=1)]
X = data_train.query(f'(conteo > {min_casos}) & (defunciones > {min_defunciones})')[caracteristicas]
Y = data_train.query(f'(conteo > {min_casos}) & (defunciones > {min_defunciones})')['tasa_covid_letal']
# X['i_vuln_econo'] = -X['i_vuln_econo']
rf = RandomForestRegressor(random_state=random_seed)
rf.fit(X, Y)
return rf
# Cell
def calificar_municipios_letalidad(mun_df, regr, caracteristicas, etiqueta):
not_na_row = mun_df[caracteristicas].notnull().all(axis=1)
X = mun_df.loc[not_na_row, caracteristicas]
Y_pred = regr.predict(X)
mun_df.loc[not_na_row, etiqueta] = Y_pred
return mun_df
# Cell
def calificar_municipios_letalidad_formato_largo(mun_df, regr, caracteristicas, modelo, dia_ajuste):
mun_df = mun_df.copy()
not_na_row = mun_df[caracteristicas].notnull().all(axis=1)
X = mun_df.loc[not_na_row, caracteristicas]
Y_pred = regr.predict(X)
mun_df['modelo'] = modelo
mun_df['dia_ajuste'] = dia_ajuste
mun_df.loc[not_na_row, 'valor'] = Y_pred
return mun_df
# Cell
def seleccionar_caracteristicas(regr, X, caracteristicas):
sel = SelectFromModel(regr, prefit=True)
caracteristicas_selec = list(X[caracteristicas].columns[sel.get_support()])
return caracteristicas_selec
# Cell
def calcular_indices_vulnerabilidad(fecha_0, fecha_1, vulnerabilidad='fecha_0'):
covid_municipal_0 = tabla_covid_indicadores_municipales(fecha_0)
covid_municipal_0 = agregar_tasas_municipales(covid_municipal_0)
covid_municipal_1 = tabla_covid_indicadores_municipales(fecha_1)
covid_municipal_1 = agregar_tasas_municipales(covid_municipal_1)
caracteristicas = caracteristicas_modelos_municipios(covid_municipal_0)
resultados = covid_municipal_1.copy()
# rf = ajustar_rf_municipios(covid_municipal_0, fecha_0, caracteristicas)
# caracteristicas_rf = seleccionar_caracteristicas(rf, covid_municipal_0, caracteristicas)
# resultados = calificar_municipios_letalidad(resultados, rf, caracteristicas, etiqueta='i_RF_0')
pls = ajustar_pls_letalidad(covid_municipal_0, caracteristicas)
resultados = calificar_municipios_letalidad(resultados, pls, caracteristicas, etiqueta='i_PLS_0')
pls = ajustar_pls_letalidad(covid_municipal_1, caracteristicas)
resultados = calificar_municipios_letalidad(resultados, pls, caracteristicas, etiqueta='i_PLS_1')
# pls = ajustar_pls_letalidad(covid_municipal_0, caracteristicas_rf)
# resultados = calificar_municipios_letalidad(resultados, pls, caracteristicas, etiqueta='i_PLS_RF_1')
return resultados
# Cell
def calcular_indices_vulnerabilidad_formato_largo(fecha_0, fecha_1):
covid_municipal_0 = tabla_covid_indicadores_municipales(fecha_0)
covid_municipal_0 = agregar_tasas_municipales(covid_municipal_0)
covid_municipal_1 = tabla_covid_indicadores_municipales(fecha_1)
covid_municipal_1 = agregar_tasas_municipales(covid_municipal_1)
caracteristicas = caracteristicas_modelos_municipios(covid_municipal_0)
# rf = ajustar_rf_municipios(covid_municipal_0, fecha_0, caracteristicas)
# caracteristicas_rf = seleccionar_caracteristicas(rf, covid_municipal_0, caracteristicas)
# resultados = calificar_municipios_letalidad(resultados, rf, caracteristicas, etiqueta='i_RF_0')
pls = ajustar_pls_letalidad(covid_municipal_0, caracteristicas)
resultados = calificar_municipios_letalidad_formato_largo(covid_municipal_0, pls, caracteristicas,
modelo='PLS', dia_ajuste=fecha_0)
pls = ajustar_pls_letalidad(covid_municipal_1, caracteristicas)
resultados_temp = calificar_municipios_letalidad_formato_largo(covid_municipal_1, pls, caracteristicas,
modelo='PLS', dia_ajuste=fecha_1)
resultados = pd.concat([resultados, resultados_temp], ignore_index=True)
# pls = ajustar_pls_letalidad(covid_municipal_0, caracteristicas_rf)
# resultados = calificar_municipios_letalidad(resultados, pls, caracteristicas, etiqueta='i_PLS_RF_1')
return resultados
# Cell
def calcular_periodo_vulnerabilidad(inicio, fin, min_defunciones=-1):
inicio = pd.to_datetime(inicio, yearfirst=True)
fin = pd.to_datetime(fin, yearfirst=True)
fechas = pd.date_range(inicio, fin)
resultados = []
modelos = []
asegura_archivos_covid_disponibles(fechas)
f = IntProgress(min=0, max=len(fechas) - 1) # instantiate the bar
display(f) # display the bar
for count, fecha in enumerate(fechas):
covid_municipal = tabla_covid_indicadores_municipales(fecha.strftime("%y%m%d"))
covid_municipal = agregar_tasas_municipales(covid_municipal)
caracteristicas = caracteristicas_modelos_municipios(covid_municipal)
pls = ajustar_pls_letalidad(covid_municipal, caracteristicas, min_defunciones=min_defunciones)
df = calificar_municipios_letalidad_formato_largo(covid_municipal, pls, caracteristicas,
modelo='PLS', dia_ajuste=fecha)
resultados.append(df)
modelo = pd.DataFrame({'caracteristica': caracteristicas, 'coef': pls.coef_})
modelo['dia_ajuste'] = fecha
modelo['modelo'] = 'PLS'
modelos.append(modelo)
rf = ajustar_rf_letalidad(covid_municipal, caracteristicas, min_defunciones=min_defunciones)
df = calificar_municipios_letalidad_formato_largo(covid_municipal, rf, caracteristicas,
modelo='RF', dia_ajuste=fecha)
resultados.append(df)
modelo = pd.DataFrame({'caracteristica': caracteristicas, 'coef': rf.feature_importances_})
modelo['dia_ajuste'] = fecha
modelo['modelo'] = 'RF'
modelos.append(modelo)
f.value = count
resultados_df = pd.concat(resultados, ignore_index=True)
modelos_df = pd.concat(modelos, ignore_index=True)
return modelos_df, resultados_df
# Cell
def periodo_vulnerabilidad_con_dataframe(covid_municipal, inicio, fin, columna='tasa_covid_letal',
min_casos=20, min_defunciones=-1, rf=True):
"""Calcula la vulnerabilidad (PLS) para todo el periodo usando como objetivo
la columna que se le pase.
:param df: el dataframe con los datos para ajustar el modelo. Debe traer ya las tasas municipales
:type df: pd.DataFrame
:param inicio: fecha inicial (Y-m-d)
:type inicio: str
:param fin: fecha final (Y-m-d)
:type fin: str
:param columna: la columna para usar como objetivo, el default es 'tasa_covid_letal'
:type columna: str
:param min_casos: Número mínimo de casos para considerar a un municipio
:type min_casos: int
:param min_defunciones: Número mínimo de defunciones para considerar a un municipio
:type min_defunciones: int
:param rf: True/False ajustar también un nmodelo de Random Forest a los dato
:type rf: bool
:returns: Un DataFrame igual que el de entrada pero cun una columna extra con el resultado
del modelo. La columna se llama 'valor_{columna}'
:rtype: gpd.GeoDataFrame
"""
inicio = pd.to_datetime(inicio, yearfirst=True)
fin = pd.to_datetime(fin, yearfirst=True)
fin = min(covid_municipal.FECHA_INGRESO.max(), fin)
fechas = pd.date_range(inicio, fin)
resultados = []
modelos = []
f = IntProgress(min=0, max=len(fechas) - 1) # instantiate the bar
display(f) # display the bar
# covid_municipal = agregar_tasas_municipales(df)
caracteristicas = caracteristicas_modelos_municipios(covid_municipal)
for count, fecha in enumerate(fechas):
covid_municipal_fecha = covid_municipal.query(
f'FECHA_INGRESO == "{fecha.strftime("%Y-%m-%d")}"')
pls = ajustar_pls_columna(covid_municipal_fecha,
caracteristicas,
columna=columna,
min_casos=min_casos,
min_defunciones=min_defunciones)
df = calificar_municipios_letalidad_formato_largo(covid_municipal_fecha,
pls,
caracteristicas,
modelo='PLS', dia_ajuste=fecha)
resultados.append(df)
modelo = pd.DataFrame({'caracteristica': caracteristicas, 'coef': pls.coef_})
modelo['dia_ajuste'] = fecha
modelo['modelo'] = 'PLS'
modelos.append(modelo)
if rf:
rf = ajustar_rf_letalidad(covid_municipal_fecha,
caracteristicas,
min_casos=min_casos,
min_defunciones=min_defunciones)
df = calificar_municipios_letalidad_formato_largo(covid_municipal_fecha,
rf, caracteristicas,
modelo='RF',
dia_ajuste=fecha)
resultados.append(df)
modelo = pd.DataFrame({'caracteristica': caracteristicas, 'coef': rf.feature_importances_})
modelo['dia_ajuste'] = fecha
modelo['modelo'] = 'RF'
modelos.append(modelo)
f.value = count
resultados_df = | pd.concat(resultados, ignore_index=True) | pandas.concat |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import division
import math
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import validation
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_is_fitted
from scipy.spatial.distance import cdist
from skmultiflow.drift_detection import KSWIN
from skmultiflow.drift_detection import ADWIN
class ReactiveRobustSoftLearningVectorQuantization(ClassifierMixin, BaseEstimator):
"""Reactive Robust Soft Learning Vector Quantization
Parameters
----------
prototypes_per_class : int or list of int, optional (default=1)
Number of prototypes per class. Use list to specify different
numbers per class.
initial_prototypes : array-like, shape = [n_prototypes, n_features + 1],
optional
Prototypes to start with. If not given initialization near the class
means. Class label must be placed as last entry of each prototype.
sigma : float, optional (default=0.5)
Variance for the distribution.
random_state : int, RandomState instance or None, optional
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
drift_detector : string, Type of concept drift DETECTION.
None means no concept drift detection
If KS, use of Kolmogorov Smirnov test [1]_.
IF DIST, monitoring class distances to detect outlier.
IF ADWIN, use ADWIN detector [1]_.
confidence : float, p-Value of Kolmogorov–Smirnov test(default=0.05)
gamma : float, Decay Rate for Adadelta (default=0.9)
replace : bool, True, replaces the current set of prototypes if concept
drift is detected(default=0.05) and False adds a one prototype per class
to the prototype set for representing the new concept
window_size: float (default=100)
Size of the sliding window for the KSWIN drift detector
stat_size: float (default=30)
Size of the statistic window for the KSWIN drift detector
Notes
-----
RSSLVQ (Reactive Robust Soft Learning Vector Quantization) [1]_ is concept
drift stream classifier, equiped with the KSWIN drift detector and the
momentum based gradient descent to adapt fast to conceptual changes after
detection. See documentation for KSWIN in the imported file.
Attributes
----------
prototypes : array-like, shape = [n_prototypes, n_features]
Prototype vector, where n_prototypes in the number of prototypes and
n_features is the number of features
prototypes_classes : array-like, shape = [n_prototypes]
Prototypes classes
class_labels : array-like, shape = [n_classes]
Array containing labels.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, Reactive
Soft Prototype Computing for Concept Drift Streams, Neurocomputing, 2020,
.. [2] <NAME>, and <NAME>. "Learning from time-changing data with adaptive windowing."
In Proceedings of the 2007 SIAM international conference on data mining, pp. 443-448.
Society for Industrial and Applied Mathematics, 2007.
"""
def __init__(self, prototypes_per_class=1, initial_prototypes=None,
sigma=1.0, random_state=112, drift_detector="KS", confidence=0.05,
gamma: float = 0.9, replace: bool = True, window_size=100, stat_size=30,):
self.sigma = sigma
self.random_state = random_state
self.initial_prototypes = initial_prototypes
self.prototypes_per_class = prototypes_per_class
self.initial_fit = True
self.class_labels = []
#### Reactive extensions ####
self.confidence = confidence
self.counter = 0
self.cd_detects = []
self.drift_detector = drift_detector
self.drift_detected = False
self.replace = replace
self.init_drift_detection = True
self.window_size = window_size
self.stat_size = stat_size
#### Adadelta ####
self.decay_rate = gamma
self.epsilon = 1e-8
if self.prototypes_per_class < 1:
raise ValueError("Number of prototypes per class must be greater or equal to 1")
if self.drift_detector != "KS" and self.drift_detector != "DIST" and self.drift_detector != "ADWIN":
raise ValueError("Drift detector must be either KS, ADWIN or DIST!")
if self.confidence <= 0 or self.confidence >= 1:
raise ValueError("Confidence of test must be between 0 and 1!")
if self.sigma < 0:
raise ValueError("Sigma must be greater than zero")
def _optimize(self, x, y, random_state):
"""Implementation of Adadelta"""
n_data, n_dim = x.shape
nb_prototypes = self.prototypes_classes.size
prototypes = self.prototype_set.reshape(nb_prototypes, n_dim)
for i in range(n_data):
xi = x[i]
c_xi = y[i]
for j in range(prototypes.shape[0]):
d = (xi - prototypes[j])
if self.prototypes_classes[j] == c_xi:
gradient = (self._p(j, xi, prototypes=self.prototype_set, y=c_xi) -
self._p(j, xi, prototypes=self.prototype_set)) * d
else:
gradient = - self._p(j, xi, prototypes=self.prototype_set) * d
# Accumulate gradient
self.squared_mean_gradient[j] = self.decay_rate * self.squared_mean_gradient[j] + \
(1 - self.decay_rate) * gradient ** 2
# Compute update/step
step = ((self.squared_mean_step[j] + self.epsilon) / \
(self.squared_mean_gradient[j] + self.epsilon)) ** 0.5 * gradient
# Accumulate updates
self.squared_mean_step[j] = self.decay_rate * self.squared_mean_step[j] + \
(1 - self.decay_rate) * step ** 2
# Attract/Distract prototype to/from data point
self.prototype_set[j] += step
# """Implementation of Stochastical Gradient Descent"""
# n_data, n_dim = X.shape
# nb_prototypes = self.prototypes_classes.size
# prototypes = self.prototype_set.reshape(nb_prototypes, n_dim)
#
# for i in range(n_data):
# xi = X[i]
# c_xi = y[i]
# for j in range(prototypes.shape[0]):
# d = (xi - prototypes[j])
# c = 1/ self.sigma
# if self.prototypes_classes[j] == c_xi:
# # Attract prototype to data point
# self.prototype_set[j] += c * (self._p(j, xi, prototypes=self.prototype_set, y=c_xi) -
# self._p(j, xi, prototypes=self.prototype_set)) * d
# else:
# # Distance prototype from data point
# self.prototype_set[j] -= c * self._p(j, xi, prototypes=self.prototype_set) * d
def _costf(self, x, w, **kwargs):
d = (x - w)[np.newaxis].T
d = d.T.dot(d)
return -d / (2 * self.sigma)
def _p(self, j, e, y=None, prototypes=None, **kwargs):
if prototypes is None:
prototypes = self.prototype_set
if y is None:
fs = [self._costf(e, w, **kwargs) for w in prototypes]
else:
fs = [self._costf(e, prototypes[i], **kwargs) for i in
range(prototypes.shape[0]) if
self.prototypes_classes[i] == y]
fs_max = max(fs)
s = sum([np.math.exp(f - fs_max) for f in fs])
o = np.math.exp(
self._costf(e, prototypes[j], **kwargs) - fs_max) / s
return o
def get_prototypes(self):
"""Returns the prototypes"""
return self.prototype_set
def predict(self, x):
"""Predict class membership index for each input sample.
This function does classification on an array of
test vectors X.
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
Returns predicted values.
"""
return np.array([self.prototypes_classes[np.array([self._costf(xi, p) for p in self.prototype_set]).argmax()] for xi in x])
def posterior(self, y, x):
"""
calculate the posterior for x:
p(y|x)
Parameters
----------
y: class
label
x: array-like, shape = [n_features]
sample
Returns
-------
posterior
:return: posterior
"""
check_is_fitted(self, ['prototype_set', 'prototypes_classes'])
x = validation.column_or_1d(x)
if y not in self.class_labels :
raise ValueError('y must be one of the labels\n'
'y=%s\n'
'labels=%s' % (y, self.class_labels ))
s1 = sum([self._costf(x, self.prototype_set[i]) for i in
range(self.prototype_set.shape[0]) if
self.prototypes_classes[i] == y])
s2 = sum([self._costf(x, w) for w in self.prototype_set])
return s1 / s2
def get_info(self):
return 'RSLVQ'
def predict_proba(self, X):
""" predict_proba
Predicts the probability of each sample belonging to each one of the
known target_values.
Parameters
----------
X: Numpy.ndarray of shape (n_samples, n_features)
A matrix of the samples we want to predict.
Returns
-------
numpy.ndarray
An array of shape (n_samples, n_features), in which each outer entry is
associated with the X entry of the same index. And where the list in
index [i] contains len(self.target_values) elements, each of which represents
the probability that the i-th sample of X belongs to a certain label.
"""
return 'Not implemented'
def reset(self):
self.__init__()
def _validate_train_parms(self, train_set, train_lab, classes=None):
random_state = validation.check_random_state(self.random_state)
train_set, train_lab = validation.check_X_y(train_set, train_lab.ravel())
if (self.initial_fit):
if (classes):
self.class_labels = np.asarray(classes)
self.protos_initialized = np.zeros(self.class_labels .size)
else:
self.class_labels = unique_labels(train_lab)
self.protos_initialized = np.zeros(self.class_labels .size)
nb_classes = len(self.class_labels )
nb_samples, nb_features = train_set.shape # nb_samples unused
# set prototypes per class
if isinstance(self.prototypes_per_class, int) or isinstance(self.prototypes_per_class, np.int64):
if self.prototypes_per_class < 0 or not isinstance(
self.prototypes_per_class, int) and not isinstance(
self.prototypes_per_class, np.int64):
# isinstance(self.prototypes_per_class, np.int64) fixes the singleton array array (1) is ... bug of gridsearch parallel
raise ValueError("prototypes_per_class must be a positive int")
# nb_ppc = number of protos per class
nb_ppc = np.ones([nb_classes],
dtype='int') * self.prototypes_per_class
else:
nb_ppc = validation.column_or_1d(
validation.check_array(self.prototypes_per_class,
ensure_2d=False, dtype='int'))
if nb_ppc.min() <= 0:
raise ValueError(
"values in prototypes_per_class must be positive")
if nb_ppc.size != nb_classes:
raise ValueError(
"length of prototypes per class"
" does not fit the number of classes"
"classes=%d"
"length=%d" % (nb_classes, nb_ppc.size))
# initialize prototypes
if self.initial_prototypes is None:
if self.initial_fit:
self.prototype_set = np.empty([np.sum(nb_ppc), nb_features], dtype=np.double)
self.prototypes_classes = np.empty([nb_ppc.sum()], dtype=self.class_labels .dtype)
pos = 0
for actClassIdx in range(len(self.class_labels )):
actClass = self.class_labels [actClassIdx]
nb_prot = nb_ppc[actClassIdx] # nb_ppc: prototypes per class
if (self.protos_initialized[actClassIdx] == 0 and actClass in unique_labels(train_lab)):
mean = np.mean(
train_set[train_lab == actClass, :], 0)
self.prototype_set[pos:pos + nb_prot] = mean + (
random_state.rand(nb_prot, nb_features) * 2 - 1)
if math.isnan(self.prototype_set[pos, 0]):
print('Prototype is NaN: ', actClass)
self.protos_initialized[actClassIdx] = 0
else:
self.protos_initialized[actClassIdx] = 1
self.prototypes_classes[pos:pos + nb_prot] = actClass
pos += nb_prot
else:
x = validation.check_array(self.initial_prototypes)
self.prototype_set = x[:, :-1]
self.prototypes_classes = x[:, -1]
if self.prototype_set.shape != (np.sum(nb_ppc), nb_features):
raise ValueError("the initial prototypes have wrong shape\n"
"found=(%d,%d)\n"
"expected=(%d,%d)" % (
self.prototype_set.shape[0], self.prototype_set.shape[1],
nb_ppc.sum(), nb_features))
if set(self.prototypes_classes) != set(self.class_labels ):
raise ValueError(
"prototype labels and test data classes do not match\n"
"classes={}\n"
"prototype labels={}\n".format(self.class_labels , self.prototypes_classes))
if self.initial_fit:
# Next two lines are Init for Adadelta/RMSprop
self.squared_mean_gradient = np.zeros_like(self.prototype_set)
self.squared_mean_step = np.zeros_like(self.prototype_set)
self.initial_fit = False
return train_set, train_lab, random_state
def fit(self, X, y, classes=None):
"""Fit the LVQ model to the given training data and parameters using
l-bfgs-b.
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
Returns
--------
self
"""
X, y, random_state = self._validate_train_parms(X, y, classes=classes)
if len(np.unique(y)) == 1:
raise ValueError("fitting " + type(
self).__name__ + " with only one class is not possible")
# X = preprocessing.scale(X)
self._optimize(X, y, random_state)
return self
def partial_fit(self, X, y, classes=None):
"""Fit the LVQ model to the given training data and parameters using
l-bfgs-b.
Parameters
----------
x : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
Returns
--------
self
"""
if set(unique_labels(y)).issubset(set(self.class_labels )) or self.initial_fit == True:
X, y, random_state = self._validate_train_parms(
X, y, classes=classes)
else:
raise ValueError(
'Class {} was not learned - please declare all classes in first call of fit/partial_fit'.format(y))
self.counter = self.counter + 1
if self.drift_detector is not None and self.concept_drift_detection(X, y):
self.cd_handling(X, y)
self.cd_detects.append(self.counter)
# X = preprocessing.scale(X)
self._optimize(X, y, self.random_state)
return self
def save_data(self, X, y, random_state):
pd.DataFrame(self.prototype_set).to_csv("Prototypes.csv")
| pd.DataFrame(self.prototypes_classes) | pandas.DataFrame |
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
from pandas import Period, Timestamp, offsets
class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="A", year=2007)
result1 = val.asfreq("5t")
result2 = val.asfreq("t")
expected = Period("2007-12-31 23:59", freq="t")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5T"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "T"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq="A", year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "S") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "S") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
assert ival_A.asfreq("B", "S") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "S") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
assert ival_A.asfreq("H", "S") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "S") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
assert ival_A.asfreq("T", "S") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "S") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "S") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "S") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("A") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="A", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("A") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A
assert ival_Q.asfreq("M", "S") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "S") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
assert ival_Q.asfreq("B", "S") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "S") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("H", "S") == ival_Q_to_H_start
assert ival_Q.asfreq("H", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "S") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("S", "S") == ival_Q_to_S_start
assert ival_Q.asfreq("S", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "S") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "S") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = Period(freq="A", year=2007)
ival_M_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_M_to_T_end = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_M_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_M_to_S_end = Period(
freq="S", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
assert ival_M.asfreq("A") == ival_M_to_A
assert ival_M_end_of_year.asfreq("A") == ival_M_to_A
assert ival_M.asfreq("Q") == ival_M_to_Q
assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q
assert ival_M.asfreq("W", "S") == ival_M_to_W_start
assert ival_M.asfreq("W", "E") == ival_M_to_W_end
assert ival_M.asfreq("B", "S") == ival_M_to_B_start
assert ival_M.asfreq("B", "E") == ival_M_to_B_end
assert ival_M.asfreq("D", "S") == ival_M_to_D_start
assert ival_M.asfreq("D", "E") == ival_M_to_D_end
assert ival_M.asfreq("H", "S") == ival_M_to_H_start
assert ival_M.asfreq("H", "E") == ival_M_to_H_end
assert ival_M.asfreq("Min", "S") == ival_M_to_T_start
assert ival_M.asfreq("Min", "E") == ival_M_to_T_end
assert ival_M.asfreq("S", "S") == ival_M_to_S_start
assert ival_M.asfreq("S", "E") == ival_M_to_S_end
assert ival_M.asfreq("M") == ival_M
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq="W", year=2007, month=1, day=1)
ival_WSUN = Period(freq="W", year=2007, month=1, day=7)
ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6)
ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5)
ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4)
ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3)
ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2)
ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31)
ival_W_to_A = Period(freq="A", year=2007)
ival_W_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_W_to_M = Period(freq="M", year=2007, month=1)
if Period(freq="D", year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq="A", year=2007)
else:
ival_W_to_A_end_of_year = Period(freq="A", year=2008)
if Period(freq="D", year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=2)
if Period(freq="D", year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=2)
ival_W_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq="B", year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_W_to_T_end = Period(
freq="Min", year=2007, month=1, day=7, hour=23, minute=59
)
ival_W_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_W_to_S_end = Period(
freq="S", year=2007, month=1, day=7, hour=23, minute=59, second=59
)
assert ival_W.asfreq("A") == ival_W_to_A
assert ival_W_end_of_year.asfreq("A") == ival_W_to_A_end_of_year
assert ival_W.asfreq("Q") == ival_W_to_Q
assert ival_W_end_of_quarter.asfreq("Q") == ival_W_to_Q_end_of_quarter
assert ival_W.asfreq("M") == ival_W_to_M
assert ival_W_end_of_month.asfreq("M") == ival_W_to_M_end_of_month
assert ival_W.asfreq("B", "S") == ival_W_to_B_start
assert ival_W.asfreq("B", "E") == ival_W_to_B_end
assert ival_W.asfreq("D", "S") == ival_W_to_D_start
assert ival_W.asfreq("D", "E") == ival_W_to_D_end
assert ival_WSUN.asfreq("D", "S") == ival_WSUN_to_D_start
assert ival_WSUN.asfreq("D", "E") == ival_WSUN_to_D_end
assert ival_WSAT.asfreq("D", "S") == ival_WSAT_to_D_start
assert ival_WSAT.asfreq("D", "E") == ival_WSAT_to_D_end
assert ival_WFRI.asfreq("D", "S") == ival_WFRI_to_D_start
assert ival_WFRI.asfreq("D", "E") == ival_WFRI_to_D_end
assert ival_WTHU.asfreq("D", "S") == ival_WTHU_to_D_start
assert ival_WTHU.asfreq("D", "E") == ival_WTHU_to_D_end
assert ival_WWED.asfreq("D", "S") == ival_WWED_to_D_start
assert ival_WWED.asfreq("D", "E") == ival_WWED_to_D_end
assert ival_WTUE.asfreq("D", "S") == ival_WTUE_to_D_start
assert ival_WTUE.asfreq("D", "E") == ival_WTUE_to_D_end
assert ival_WMON.asfreq("D", "S") == ival_WMON_to_D_start
assert ival_WMON.asfreq("D", "E") == ival_WMON_to_D_end
assert ival_W.asfreq("H", "S") == ival_W_to_H_start
assert ival_W.asfreq("H", "E") == ival_W_to_H_end
assert ival_W.asfreq("Min", "S") == ival_W_to_T_start
assert ival_W.asfreq("Min", "E") == ival_W_to_T_end
assert ival_W.asfreq("S", "S") == ival_W_to_S_start
assert ival_W.asfreq("S", "E") == ival_W_to_S_end
assert ival_W.asfreq("W") == ival_W
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
ival_W.asfreq("WK")
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=1)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-SAT", year=2007, month=1, day=6)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-FRI", year=2007, month=1, day=5)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-THU", year=2007, month=1, day=4)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-WED", year=2007, month=1, day=3)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-TUE", year=2007, month=1, day=2)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-MON", year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq="B", year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq="B", year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq="B", year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq="B", year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq="B", year=2007, month=1, day=5)
ival_B_to_A = Period(freq="A", year=2007)
ival_B_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_B_to_M = Period(freq="M", year=2007, month=1)
ival_B_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_B_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = Period(freq="H", year=2007, month=1, day=1, hour=23)
ival_B_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_B_to_T_end = Period(
freq="Min", year=2007, month=1, day=1, hour=23, minute=59
)
ival_B_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_B_to_S_end = Period(
freq="S", year=2007, month=1, day=1, hour=23, minute=59, second=59
)
assert ival_B.asfreq("A") == ival_B_to_A
assert ival_B_end_of_year.asfreq("A") == ival_B_to_A
assert ival_B.asfreq("Q") == ival_B_to_Q
assert ival_B_end_of_quarter.asfreq("Q") == ival_B_to_Q
assert ival_B.asfreq("M") == ival_B_to_M
assert ival_B_end_of_month.asfreq("M") == ival_B_to_M
assert ival_B.asfreq("W") == ival_B_to_W
assert ival_B_end_of_week.asfreq("W") == ival_B_to_W
assert ival_B.asfreq("D") == ival_B_to_D
assert ival_B.asfreq("H", "S") == ival_B_to_H_start
assert ival_B.asfreq("H", "E") == ival_B_to_H_end
assert ival_B.asfreq("Min", "S") == ival_B_to_T_start
assert ival_B.asfreq("Min", "E") == ival_B_to_T_end
assert ival_B.asfreq("S", "S") == ival_B_to_S_start
assert ival_B.asfreq("S", "E") == ival_B_to_S_end
assert ival_B.asfreq("B") == ival_B
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = | Period(freq="D", year=2007, month=1, day=1) | pandas.Period |
import logging
import pandas as pd
"""
User başına aramaların spam olarak etiketlenebilmesi
için ön koşul olarak call_count en az 5 belirlenmiştir;
Eğer bu koşul, aşağıdakilerden biri ile birlikte sağlanıyorsa
user spam olarak etiketlenmelidir.
1. call_count'un yuzde 50'sinden azı answered ise;
2. answered başına 5 saniyeden az konuşulmuş ise;
"""
def label_data(call_data: list) -> pd.DataFrame:
logging.info("Labelling data...")
for data in call_data:
if data['call_count'] > 4:
if data['answered_call'] < (data['call_count'] / 2):
data['is_spam'] = True
elif data['answered_call'] > 0 and (data['total_duration (second)'] / data['answered_call']) < 5:
print(data['user_id'], " ", data['total_duration (second)'] / data['answered_call'])
data['is_spam'] = True
else:
data['is_spam'] = False
else:
data['is_spam'] = False
return | pd.DataFrame(call_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 31 10:28:49 2018
@author: dani
Make tSNE & PCA plots for each combination of 2 channels from same movie
Need to ask <NAME> what moving threshold is for 'time_moving01' etc. parameters
and whether/how I can change that if needed
this should be somewhere in the individual python files, but not necessarily easy to find
"""
from __future__ import division
import os
import pandas as pd
#import numpy as np
from matplotlib import pyplot as plt
import matplotlib
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import datetime
import seaborn as sns
#import skimage.io as io
#from scipy.stats import mannwhitneyu, mstats
#import math
#import csv
indir = indir = "./TrackMate_Analysis/XY_data_60/HM_output-move_thresh_1/"
outdir = indir+'HM_Plots_pairwise_ALL/'
# set speed threshold for analysis (in px/frame)
#threshold = 1 # currently unused
# I will probably replace this by only using tracks that have a time_moving01>0
colors=['b','darkorange']
# not sure what next 2 parameters do exactly, sth to do with formatting though
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
# set which parameters to use.
# Individual lines can be commented out
col = [
'Well/XY',
# 'cell_id',
#
'total_distance', 'net_distance', 'linearity', 'spearmanrsq', 'progressivity',
'max_speed', 'min_speed', 'avg_speed',
'MSD_slope', 'hurst_RS', 'nongauss', 'disp_var', 'disp_skew',
'rw_linearity', 'rw_netdist',
'rw_kurtosis01', 'rw_kurtosis02', 'rw_kurtosis03', 'rw_kurtosis04', 'rw_kurtosis05',
'avg_moving_speed01', 'avg_moving_speed02', 'avg_moving_speed03', 'avg_moving_speed04', 'avg_moving_speed05',
'time_moving01', 'time_moving02', 'time_moving03', 'time_moving04', 'time_moving05',
'autocorr_1', 'autocorr_2', 'autocorr_3', 'autocorr_4', 'autocorr_5',
'p_rturn_9_5', 'p_rturn_10_5', 'p_rturn_11_5',
'mean_theta_9_5', 'min_theta_9_5', 'max_theta_9_5',
'mean_theta_10_5', 'min_theta_10_5', 'max_theta_10_5',
'mean_theta_11_5', 'min_theta_11_5', 'max_theta_11_5',
'rw_kurtosis06', 'rw_kurtosis07', 'rw_kurtosis08', 'rw_kurtosis09', 'rw_kurtosis10',
'avg_moving_speed06', 'avg_moving_speed07', 'avg_moving_speed08', 'avg_moving_speed09', 'avg_moving_speed10',
'time_moving06', 'time_moving07', 'time_moving08', 'time_moving09', 'time_moving10',
'autocorr_6', 'autocorr_7', 'autocorr_8', 'autocorr_9', 'autocorr_10',
'p_rturn_9_6', 'p_rturn_10_6', 'p_rturn_11_6',
'mean_theta_9_6', 'min_theta_9_6', 'max_theta_9_6',
'mean_theta_10_6', 'min_theta_10_6', 'max_theta_10_6',
'mean_theta_11_6', 'min_theta_11_6', 'max_theta_11_6',
#
# 'Test_coulmn','Test_coulmn2',
]
outdir = outdir.replace('pairwise','pairwise_ALL')
if not os.path.exists(outdir):
os.makedirs(outdir)
#file_list = [f for f in os.listdir(indir) if f.endswith('Channel1_SpotsStats.csv')]
#file_list = [f for f in os.listdir(indir) if f.endswith('Channel1_SpotsStats.csv') and f.startswith('HMout_Stitch_180802_2Dmig_BSA_CTVi+CTFR_6um_006.tif---')]
#file_list = [f for f in os.listdir(indir) if f.endswith('Channel1_SpotsStats.csv') and f.startswith('HMout_00')]
file_list = [f for f in os.listdir(indir) if f.endswith('Channel1_SpotsStats.csv') and f.startswith('HMout_Stitch_180822_1_BSA_CTVi+CTYw_001')]
# I could rework this to represent dye used or experimental condition
labels = ['Channel1','Channel2']
counter = 0
start = str(datetime.datetime.now().time())[:8]
N=['','']
for file in file_list:
counter +=1
Ch1 = file
Ch2 = file.replace('Channel1','Channel2')
# create figure title (for plot output)
figtitle = file.replace('Channel1_SpotsStats.csv','')
figtitle = figtitle.replace('HMout_','')
figtitle = figtitle.replace('Stitch_','')
figtitle = figtitle.replace('.tif','')
figtitle = figtitle.replace('---','')
pos = str(counter).zfill(2)+r'/'+str(len(file_list))
print (pos,r'-',figtitle)
if os.path.exists(indir+Ch2):
### LOAD AND ORGANIZE DATA
dfCh1 = pd.read_csv(indir+Ch1,usecols = col)
dfCh2 = | pd.read_csv(indir+Ch2,usecols = col) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
"""
import sys
import os
import pandas as pd
import time
from datetime import datetime
def to_list(s):
return list(s)
def generate_rows(s):
return s*[[s,0,0]]
#input_file = sys.argv[1]
#output_file = sys.argv[2]
# input_dir=r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets"
# output_dir=r"C:\Gamal Elkoumy\PhD\OneDrive - Tartu Ülikool\Secure MPC\Business Process Mining SourceCode\Datasets"
#
# file_name= "CCC19_3_columns"
# preprocessing
def preprocessing_partyA(input_dir,output_dir,file_name):
input_file= os.path.join(input_dir,file_name+".csv")
data = pd.read_csv(input_file)
new_case_ids = pd.Index(data['case'].unique())
data['case'] = data['case'].apply(lambda x: new_case_ids.get_loc(x))
#df['event'] = pd.util.hash_pandas_object(df['event'],index=False)
""" generating relative time"""
try:
data['completeTime'] = data['completeTime'].apply(lambda x: int(time.mktime(datetime.strptime(x,"%Y-%m-%d %H:%M:%S").timetuple())))
except:
data['completeTime'] = data['completeTime'].apply(lambda x: int(time.mktime(datetime.strptime(x,"%Y-%m-%d %H:%M:%S%z").timetuple())))
data.completeTime=data.completeTime-min(data.completeTime)
""" Generating binary representation of the events """
#moving event to the last column
data=data[['case','completeTime','event']]
unique_events = list(data.event.unique())
#
ini_binary = "0"*(len(unique_events)-1)+"1"
event_idx= {}
for event in unique_events:
event_idx[event]= ini_binary
ini_binary= ini_binary[1:]+"0"
bits_column_names=["b"+str(i) for i in range(0,len(unique_events))]
data.event=data.event.apply(lambda x: event_idx[x])
temp= data.event.apply(to_list)
temp= pd.DataFrame.from_dict(dict(zip(temp.index, temp.values))).T
data[bits_column_names]=temp
""" splitting the file over partyA and partyB """
party_A=pd.DataFrame()
party_B=pd.DataFrame()
bits_size= len(bits_column_names)
events_in_b=[]
for i in range (bits_size//2):
s=""
for j in range(bits_size):
if j==i:
s+="1"
else:
s+="0"
events_in_b.append(s)
party_A=data[~data.event.isin(events_in_b)]
party_B=data[data.event.isin(events_in_b)]
""" performing padding """
''' party A '''
counts = party_A.groupby("case").count().event
max_count= counts.max()
need_increase=counts[counts<max_count]
difference=max_count-need_increase
padded_value=[]
if len(difference)!=0:
for i in difference.index:
temp= difference[i] *[[i,0,0]]
padded_value=padded_value+temp
padded_value= | pd.DataFrame.from_records(padded_value) | pandas.DataFrame.from_records |
import tempfile
from . import common
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def taxa_cols(df):
"""Returns metadata columns from DataFrame object."""
cols = []
for col in df.columns:
if 'Unassigned' in col:
cols.append(col)
elif '__' in col:
cols.append(col)
else:
continue
return cols
def _get_mf_cols(df):
"""Returns metadata columns from DataFrame object."""
cols = []
for column in df.columns:
if 'Unassigned' in column:
continue
elif '__' in column:
continue
else:
cols.append(column)
return cols
def _filter_samples(df, mf, exclude_samples, include_samples):
"""Returns DataFrame objects after sample filtering."""
if exclude_samples and include_samples:
m = ("Cannot use 'exclude_samples' and "
"'include_samples' arguments together")
raise ValueError(m)
elif exclude_samples:
for x in exclude_samples:
for y in exclude_samples[x]:
i = mf[x] != y
df = df.loc[i]
mf = mf.loc[i]
elif include_samples:
for x in include_samples:
i = mf[x].isin(include_samples[x])
df = df.loc[i]
mf = mf.loc[i]
else:
pass
return (df, mf)
def _sort_by_mean(df):
"""Returns DataFrame object after sorting taxa by mean relative abundance."""
a = df.div(df.sum(axis=1), axis=0)
a = a.loc[:, a.mean().sort_values(ascending=False).index]
return df[a.columns]
def _get_others_col(df, count, taxa_names, show_others):
"""Returns DataFrame object after selecting taxa."""
if count is not 0 and taxa_names is not None:
m = "Cannot use 'count' and 'taxa_names' arguments together"
raise ValueError(m)
elif count is not 0:
if count < df.shape[1]:
others = df.iloc[:, count-1:].sum(axis=1)
df = df.iloc[:, :count-1]
if show_others:
df = df.assign(Others=others)
else:
pass
elif taxa_names is not None:
others = df.drop(columns=taxa_names).sum(axis=1)
df = df[taxa_names]
if show_others:
df = df.assign(Others=others)
else:
pass
return df
def taxa_abundance_bar_plot(
visualization, metadata=None, level=1, group=None, group_order=None, by=None,
ax=None, figsize=None, width=0.8, count=0, exclude_samples=None,
include_samples=None, exclude_taxa=None, sort_by_names=False,
colors=None, label_columns=None, orders=None, sample_names=None,
csv_file=None, taxa_names=None, sort_by_mean1=True,
sort_by_mean2=True, sort_by_mean3=True, show_others=True,
cmap_name='Accent', legend_short=False, pname_kws=None, legend=True
):
"""
Create a bar plot showing relative taxa abundance for individual samples.
The input visualization may already contain sample metadata. To provide
new sample metadata, and ignore the existing one, use the ``metadata``
option.
By default, the method will draw a bar for each sample. To plot the
average taxa abundance of each sample group, use the ``group`` option.
+----------------+-----------------------------------------------------+
| q2-taxa plugin | Example |
+================+=====================================================+
| QIIME 2 CLI | qiime taxa barplot [OPTIONS] |
+----------------+-----------------------------------------------------+
| QIIME 2 API | from qiime2.plugins.taxa.visualizers import barplot |
+----------------+-----------------------------------------------------+
Parameters
----------
visualization : str, qiime2.Visualization, pandas.DataFrame
Visualization file or object from the q2-taxa plugin. Alternatively,
a :class:`pandas.DataFrame` object.
metadata : str or qiime2.Metadata, optional
Metadata file or object.
level : int, default: 1
Taxonomic level at which the features should be collapsed.
group : str, optional
Metadata column to be used for grouping the samples.
group_order : list, optional
Order to plot the groups in.
by : list, optional
Column name(s) to be used for sorting the samples. Using 'sample-id'
will sort the samples by their name, in addition to other column
name(s) that may have been provided. If multiple items are provided,
sorting will occur by the order of the items.
ax : matplotlib.axes.Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.
figsize : tuple, optional
Width, height in inches. Format: (float, float).
width : float, default: 0.8
The width of the bars.
count : int, default: 0
The number of taxa to display. When 0, display all.
exclude_samples : dict, optional
Filtering logic used for sample exclusion.
Format: {'col': ['item', ...], ...}.
include_samples : dict, optional
Filtering logic used for sample inclusion.
Format: {'col': ['item', ...], ...}.
exclude_taxa : list, optional
The taxa names to be excluded when matched. Case insenstivie.
sort_by_names : bool, default: False
If true, sort the columns (i.e. species) to be displayed by name.
colors : list, optional
The bar colors.
label_columns : list, optional
List of metadata columns to be concatenated to form new sample
labels. Use the string 'sample-id' to indicate the sample ID column.
orders : dict, optional
Dictionary of {column1: [element1, element2, ...], column2:
[element1, element2...], ...} to indicate the order of items. Used to
sort the sampels by the user-specified order instead of ordering
numerically or alphabetically.
sample_names : list, optional
List of sample IDs to be included.
csv_file : str, optional
Path of the .csv file to output the dataframe to.
taxa_names : list, optional
List of taxa names to be displayed.
sort_by_mean1 : bool, default: True
Sort taxa by their mean relative abundance before sample filtration.
sort_by_mean2 : bool, default: True
Sort taxa by their mean relative abundance after sample filtration by
'include_samples' or 'exclude_samples'.
sort_by_mean3 : bool, default: True
Sort taxa by their mean relative abundance after sample filtration by
'sample_names'.
show_others : bool, default: True
Include the 'Others' category.
cmap_name : str, default: 'Accent'
Name of the colormap passed to `matplotlib.cm.get_cmap()`.
legend_short : bool, default: False
If true, only display the smallest taxa rank in the legend.
pname_kws : dict, optional
Keyword arguments for :meth:`dokdo.api.pname` when ``legend_short``
is True.
legend : bool, default: True
Whether to plot the legend.
Returns
-------
matplotlib.axes.Axes
Axes object with the plot drawn onto it.
See Also
--------
dokdo.api.taxa_abundance_box_plot
Examples
--------
Below is a simple example showing taxonomic abundance at the kingdom
level (i.e. ``level=1``), which is the default taxonomic rank.
.. code:: python3
import dokdo
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
qzv_file = '/Users/sbslee/Desktop/dokdo/data/moving-pictures-tutorial/taxa-bar-plots.qzv'
dokdo.taxa_abundance_bar_plot(
qzv_file,
figsize=(10, 7)
)
plt.tight_layout()
.. image:: images/taxa_abundance_bar_plot-1.png
We can change the taxonomic rank from kingdom to genus by setting
``level=6``. Note that we are using ``legend=False`` because
otherwise there will be too many taxa to display on the legend.
Note also that the colors are recycled in each bar.
.. code:: python3
dokdo.taxa_abundance_bar_plot(
qzv_file,
figsize=(10, 7),
level=6,
legend=False
)
plt.tight_layout()
.. image:: images/taxa_abundance_bar_plot-2.png
We can only show the top seven most abundant genera plus 'Others' with
``count=8``.
.. code:: python3
dokdo.taxa_abundance_bar_plot(
qzv_file,
figsize=(10, 7),
level=6,
count=8,
legend_short=True
)
plt.tight_layout()
.. image:: images/taxa_abundance_bar_plot-3.png
We can plot the figure and the legend separately.
.. code:: python3
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(10, 7), gridspec_kw={'width_ratios': [9, 1]})
dokdo.taxa_abundance_bar_plot(
qzv_file,
ax=ax1,
level=6,
count=8,
legend=False
)
dokdo.taxa_abundance_bar_plot(
qzv_file,
ax=ax2,
level=6,
count=8,
legend_short=True
)
handles, labels = ax2.get_legend_handles_labels()
ax2.clear()
ax2.legend(handles, labels)
ax2.axis('off')
plt.tight_layout()
.. image:: images/taxa_abundance_bar_plot-4.png
We can use a different color map to display more unique genera (e.g. 20).
.. code:: python3
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(10, 7), gridspec_kw={'width_ratios': [9, 1]})
dokdo.taxa_abundance_bar_plot(
qzv_file,
ax=ax1,
level=6,
count=20,
cmap_name='tab20',
legend=False
)
dokdo.taxa_abundance_bar_plot(
qzv_file,
ax=ax2,
level=6,
count=20,
cmap_name='tab20',
legend_short=True
)
handles, labels = ax2.get_legend_handles_labels()
ax2.clear()
ax2.legend(handles, labels)
ax2.axis('off')
plt.tight_layout()
.. image:: images/taxa_abundance_bar_plot-5.png
We can sort the samples by the body-site column in metadata with
``by=['body-site']``. To check whether the sorting worked properly,
we can change the x-axis tick labels to include each sample's
body-site with ``label_columns``.
.. code:: python3
dokdo.taxa_abundance_bar_plot(
qzv_file,
by=['body-site'],
label_columns=['body-site', 'sample-id'],
figsize=(10, 7),
level=6,
count=8,
legend_short=True
)
plt.tight_layout()
.. image:: images/taxa_abundance_bar_plot-6.png
If you want to sort the samples in a certain order instead of ordering
numerically or alphabetically, use the ``orders`` option.
.. code:: python3
dokdo.taxa_abundance_bar_plot(
qzv_file,
by=['body-site'],
label_columns=['body-site', 'sample-id'],
figsize=(10, 7),
level=6,
count=8,
orders={'body-site': ['left palm', 'tongue', 'gut', 'right palm']},
legend_short=True
)
plt.tight_layout()
.. image:: images/taxa_abundance_bar_plot-7.png
We can only display the 'gut' and 'tongue' samples with
``include_samples``.
.. code:: python3
fig, [ax1, ax2] = plt.subplots(1, 2, figsize=(9, 7), gridspec_kw={'width_ratios': [9, 1]})
kwargs = dict(
include_samples={'body-site': ['gut', 'tongue']},
by=['body-site'],
label_columns=['body-site', 'sample-id'],
level=6,
count=8
)
dokdo.taxa_abundance_bar_plot(
qzv_file,
ax=ax1,
legend=False,
**kwargs
)
dokdo.taxa_abundance_bar_plot(
qzv_file,
ax=ax2,
legend_short=True,
**kwargs
)
handles, labels = ax2.get_legend_handles_labels()
ax2.clear()
ax2.legend(handles, labels)
ax2.axis('off')
plt.tight_layout()
.. image:: images/taxa_abundance_bar_plot-8.png
We can make multiple bar charts grouped by body-site. When making a
grouped bar chart, it's important to include ``sort_by_mean2=False``
in order to have the same bar colors for the same taxa across different
groups.
.. code:: python3
fig, axes = plt.subplots(1, 5, figsize=(16, 7))
groups = ['gut', 'left palm', 'right palm', 'tongue']
kwargs = dict(level=6, count=8, sort_by_mean2=False, legend=False)
for i, group in enumerate(groups):
dokdo.taxa_abundance_bar_plot(
qzv_file,
ax=axes[i],
include_samples={'body-site': [group]},
**kwargs
)
if i != 0:
axes[i].set_ylabel('')
axes[i].set_yticks([])
axes[i].set_title(group)
dokdo.taxa_abundance_bar_plot(
qzv_file,
ax=axes[4],
legend_short=True,
**kwargs
)
handles, labels = axes[4].get_legend_handles_labels()
axes[4].clear()
axes[4].legend(handles, labels, loc='center left')
axes[4].axis('off')
plt.tight_layout()
.. image:: images/taxa_abundance_bar_plot-9.png
We can select specific samples with ``sample_names``.
.. code:: python3
dokdo.taxa_abundance_bar_plot(
qzv_file,
figsize=(10, 7),
level=6,
count=8,
sample_names=['L2S382', 'L4S112', 'L1S281'],
legend_short=True
)
plt.tight_layout()
.. image:: images/taxa_abundance_bar_plot-10.png
We can also pick specific colors for the bars.
.. code:: python3
dokdo.taxa_abundance_bar_plot(
qzv_file,
figsize=(10, 7),
level=6,
count=8,
sample_names=['L2S382', 'L4S112', 'L1S281'],
colors=['tab:blue', 'tab:orange', 'tab:gray'],
legend_short=True
)
plt.tight_layout()
.. image:: images/taxa_abundance_bar_plot-11.png
We can create a bar for each sample type.
.. code:: python3
dokdo.taxa_abundance_bar_plot(
qzv_file,
level=6,
count=8,
group='body-site',
figsize=(10, 7),
legend_short=True
)
plt.tight_layout()
.. image:: images/taxa_abundance_bar_plot-12.png
Of course, we can specify which groups to plot on.
.. code:: python3
dokdo.taxa_abundance_bar_plot(
qzv_file,
level=6,
count=8,
group='body-site',
group_order=['tongue', 'left palm'],
figsize=(10, 7),
legend_short=True
)
plt.tight_layout()
.. image:: images/taxa_abundance_bar_plot-13.png
"""
if isinstance(visualization, pd.DataFrame):
df = visualization
else:
with tempfile.TemporaryDirectory() as t:
common.export(visualization, t)
df = pd.read_csv(f'{t}/level-{level}.csv', index_col=0)
if sort_by_mean1:
cols = _get_mf_cols(df)
mf = df[cols]
df = df.drop(columns=cols)
df = _sort_by_mean(df)
df = pd.concat([df, mf], axis=1, join='inner')
# If provided, update the metadata.
if metadata is None:
pass
else:
mf = common.get_mf(metadata)
cols = _get_mf_cols(df)
df.drop(columns=cols, inplace=True)
df = | pd.concat([df, mf], axis=1, join='inner') | pandas.concat |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas.types.common import is_integer, is_scalar
from pandas import Index, Series, DataFrame, isnull, date_range
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.index import Timestamp
from pandas.tseries.offsets import BDay
from pandas.tseries.tdi import Timedelta
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result, expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result, expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
self.assertEqual(result, 'Missing')
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
self.assertEqual(result, 3)
result = vc.get(True, default='Missing')
self.assertEqual(result, 'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - BDay()
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
# ts.ix[mask_shifted]
# ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(is_scalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
self.assertEqual(result, s.loc['A'])
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[
-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
self.assertEqual(result, expected)
result = s.iloc[0]
self.assertEqual(result, expected)
result = s['a']
self.assertEqual(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
self.assertEqual(result, expected)
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
self.assertEqual(result, expected)
s2 = s.copy()
s2['a'] = expected
result = s2['a']
self.assertEqual(result, expected)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
self.assertEqual(orig.dtype, 'datetime64[ns, {0}]'.format(tz))
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-11-06 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
self.assertEqual(vals.dtype, 'datetime64[ns, {0}]'.format(tz))
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, ['X', 'Y', 'Z'])
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
self.assertFalse(is_integer(w[0]))
self.assertTrue(is_integer(w[1]))
self.assertTrue(is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
| tm.assert_series_equal(s, expected) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 24 22:35:51 2021
function to check missing data
input parameter: dataframe
output: missing data values
@author: Ashish
"""
# import required libraries
import re, os, emoji, numpy as np
import pandas as pd
#Count vectorizer for N grams
from sklearn.feature_extraction.text import CountVectorizer,TfidfVectorizer
# Nltk for tekenize and stopwords
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
def find_missing_data_vals(data):
total=data.isnull().sum().sort_values(ascending=False)
percentage=round(total/data.shape[0]*100,2)
return | pd.concat([total,percentage],axis=1,keys=['Total','Percentage']) | pandas.concat |
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def label_rectify(data):
data[1:-10] += 5
data[-10:] += 1
return data
def data_preparation(file):
epoch = []
top1 = []
top5 = []
loss = []
delete_title = list(range(4))
for i in range(4):
delete_title[i] = file.readline()
for i in range(39):
line = file.readline()
words = line.split()
epoch.append(int(words[7]))
top1.append(float(words[11]))
top5.append(float(words[15]))
loss.append(float(words[19]))
data = [epoch, top1, top5, loss]
data = np.array(data)
label_rectify(data[0])
data = | pd.DataFrame(data=data, index=['epoch', 'top1', 'top5', 'loss']) | pandas.DataFrame |
from __future__ import annotations
from datetime import (
datetime,
time,
timedelta,
tzinfo,
)
from typing import (
TYPE_CHECKING,
Literal,
overload,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
tslib,
)
from pandas._libs.arrays import NDArrayBacked
from pandas._libs.tslibs import (
BaseOffset,
NaT,
NaTType,
Resolution,
Timestamp,
conversion,
fields,
get_resolution,
iNaT,
ints_to_pydatetime,
is_date_array_normalized,
normalize_i8_timestamps,
timezones,
to_offset,
tzconversion,
)
from pandas._typing import npt
from pandas.errors import PerformanceWarning
from pandas.util._validators import validate_inclusive
from pandas.core.dtypes.cast import astype_dt64_to_dt64tz
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
INT64_DTYPE,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_object_dtype,
is_period_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.arrays import (
ExtensionArray,
datetimelike as dtl,
)
from pandas.core.arrays._ranges import generate_regular_range
from pandas.core.arrays.integer import IntegerArray
import pandas.core.common as com
from pandas.core.construction import extract_array
from pandas.tseries.frequencies import get_period_alias
from pandas.tseries.offsets import (
BDay,
Day,
Tick,
)
if TYPE_CHECKING:
from pandas import DataFrame
from pandas.core.arrays import (
PeriodArray,
TimedeltaArray,
)
_midnight = time(0, 0)
def tz_to_dtype(tz):
"""
Return a datetime64[ns] dtype appropriate for the given timezone.
Parameters
----------
tz : tzinfo or None
Returns
-------
np.dtype or Datetime64TZDType
"""
if tz is None:
return DT64NS_DTYPE
else:
return DatetimeTZDtype(tz=tz)
def _field_accessor(name: str, field: str, docstring=None):
def f(self):
values = self._local_timestamps()
if field in self._bool_ops:
result: np.ndarray
if field.endswith(("start", "end")):
freq = self.freq
month_kw = 12
if freq:
kwds = freq.kwds
month_kw = kwds.get("startingMonth", kwds.get("month", 12))
result = fields.get_start_end_field(
values, field, self.freqstr, month_kw
)
else:
result = fields.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.get_date_name_field(values, field)
result = self._maybe_mask_results(result, fill_value=None)
else:
result = fields.get_date_field(values, field)
result = self._maybe_mask_results(
result, fill_value=None, convert="float64"
)
return result
f.__name__ = name
f.__doc__ = docstring
return property(f)
class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps):
"""
Pandas ExtensionArray for tz-naive or tz-aware datetime data.
.. warning::
DatetimeArray is currently experimental, and its API may change
without warning. In particular, :attr:`DatetimeArray.dtype` is
expected to change to always be an instance of an ``ExtensionDtype``
subclass.
Parameters
----------
values : Series, Index, DatetimeArray, ndarray
The datetime data.
For DatetimeArray `values` (or a Series or Index boxing one),
`dtype` and `freq` will be extracted from `values`.
dtype : numpy.dtype or DatetimeTZDtype
Note that the only NumPy dtype allowed is 'datetime64[ns]'.
freq : str or Offset, optional
The frequency.
copy : bool, default False
Whether to copy the underlying array of values.
Attributes
----------
None
Methods
-------
None
"""
_typ = "datetimearray"
_scalar_type = Timestamp
_recognized_scalars = (datetime, np.datetime64)
_is_recognized_dtype = is_datetime64_any_dtype
_infer_matches = ("datetime", "datetime64", "date")
# define my properties & methods for delegation
_bool_ops: list[str] = [
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"is_leap_year",
]
_object_ops: list[str] = ["freq", "tz"]
_field_ops: list[str] = [
"year",
"month",
"day",
"hour",
"minute",
"second",
"weekofyear",
"week",
"weekday",
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"daysinmonth",
"microsecond",
"nanosecond",
]
_other_ops: list[str] = ["date", "time", "timetz"]
_datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods: list[str] = [
"to_period",
"tz_localize",
"tz_convert",
"normalize",
"strftime",
"round",
"floor",
"ceil",
"month_name",
"day_name",
]
# ndim is inherited from ExtensionArray, must exist to ensure
# Timestamp.__richcmp__(DateTimeArray) operates pointwise
# ensure that operations with numpy arrays defer to our implementation
__array_priority__ = 1000
# -----------------------------------------------------------------
# Constructors
_dtype: np.dtype | DatetimeTZDtype
_freq = None
def __init__(self, values, dtype=DT64NS_DTYPE, freq=None, copy: bool = False):
values = extract_array(values, extract_numpy=True)
if isinstance(values, IntegerArray):
values = values.to_numpy("int64", na_value=iNaT)
inferred_freq = getattr(values, "_freq", None)
if isinstance(values, type(self)):
# validation
dtz = getattr(dtype, "tz", None)
if dtz and values.tz is None:
dtype = DatetimeTZDtype(tz=dtype.tz)
elif dtz and values.tz:
if not timezones.tz_compare(dtz, values.tz):
msg = (
"Timezone of the array and 'dtype' do not match. "
f"'{dtz}' != '{values.tz}'"
)
raise TypeError(msg)
elif values.tz:
dtype = values.dtype
if freq is None:
freq = values.freq
values = values._ndarray
if not isinstance(values, np.ndarray):
raise ValueError(
f"Unexpected type '{type(values).__name__}'. 'values' must be "
"a DatetimeArray, ndarray, or Series or Index containing one of those."
)
if values.ndim not in [1, 2]:
raise ValueError("Only 1-dimensional input arrays are supported.")
if values.dtype == "i8":
# for compat with datetime/timedelta/period shared methods,
# we can sometimes get here with int64 values. These represent
# nanosecond UTC (or tz-naive) unix timestamps
values = values.view(DT64NS_DTYPE)
if values.dtype != DT64NS_DTYPE:
raise ValueError(
"The dtype of 'values' is incorrect. Must be 'datetime64[ns]'. "
f"Got {values.dtype} instead."
)
dtype = _validate_dt64_dtype(dtype)
if freq == "infer":
raise ValueError(
"Frequency inference not allowed in DatetimeArray.__init__. "
"Use 'pd.array()' instead."
)
if copy:
values = values.copy()
if freq:
freq = to_offset(freq)
if getattr(dtype, "tz", None):
# https://github.com/pandas-dev/pandas/issues/18595
# Ensure that we have a standard timezone for pytz objects.
# Without this, things like adding an array of timedeltas and
# a tz-aware Timestamp (with a tz specific to its datetime) will
# be incorrect(ish?) for the array as a whole
dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz))
NDArrayBacked.__init__(self, values=values, dtype=dtype)
self._freq = freq
if inferred_freq is None and freq is not None:
type(self)._validate_frequency(self, freq)
# error: Signature of "_simple_new" incompatible with supertype "NDArrayBacked"
@classmethod
def _simple_new( # type: ignore[override]
cls, values: np.ndarray, freq: BaseOffset | None = None, dtype=DT64NS_DTYPE
) -> DatetimeArray:
assert isinstance(values, np.ndarray)
assert values.dtype == DT64NS_DTYPE
result = super()._simple_new(values, dtype)
result._freq = freq
return result
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False):
return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy)
@classmethod
def _from_sequence_not_strict(
cls,
data,
dtype=None,
copy: bool = False,
tz=None,
freq=lib.no_default,
dayfirst: bool = False,
yearfirst: bool = False,
ambiguous="raise",
):
explicit_none = freq is None
freq = freq if freq is not lib.no_default else None
freq, freq_infer = dtl.maybe_infer_freq(freq)
subarr, tz, inferred_freq = sequence_to_dt64ns(
data,
dtype=dtype,
copy=copy,
tz=tz,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, freq_infer)
if explicit_none:
freq = None
dtype = tz_to_dtype(tz)
result = cls._simple_new(subarr, freq=freq, dtype=dtype)
if inferred_freq is None and freq is not None:
# this condition precludes `freq_infer`
cls._validate_frequency(result, freq, ambiguous=ambiguous)
elif freq_infer:
# Set _freq directly to bypass duplicative _validate_frequency
# check.
result._freq = to_offset(result.inferred_freq)
return result
@classmethod
def _generate_range(
cls,
start,
end,
periods,
freq,
tz=None,
normalize=False,
ambiguous="raise",
nonexistent="raise",
inclusive="both",
):
periods = dtl.validate_periods(periods)
if freq is None and any(x is None for x in [periods, start, end]):
raise ValueError("Must provide freq argument if no data is supplied")
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, "
"and freq, exactly three must be specified"
)
freq = to_offset(freq)
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if start is NaT or end is NaT:
raise ValueError("Neither `start` nor `end` can be NaT")
left_inclusive, right_inclusive = validate_inclusive(inclusive)
start, end, _normalized = _maybe_normalize_endpoints(start, end, normalize)
tz = _infer_tz_from_endpoints(start, end, tz)
if tz is not None:
# Localize the start and end arguments
start_tz = None if start is None else start.tz
end_tz = None if end is None else end.tz
start = _maybe_localize_point(
start, start_tz, start, freq, tz, ambiguous, nonexistent
)
end = _maybe_localize_point(
end, end_tz, end, freq, tz, ambiguous, nonexistent
)
if freq is not None:
# We break Day arithmetic (fixed 24 hour) here and opt for
# Day to mean calendar day (23/24/25 hour). Therefore, strip
# tz info from start and day to avoid DST arithmetic
if isinstance(freq, Day):
if start is not None:
start = start.tz_localize(None)
if end is not None:
end = end.tz_localize(None)
if isinstance(freq, Tick):
values = generate_regular_range(start, end, periods, freq)
else:
xdr = generate_range(start=start, end=end, periods=periods, offset=freq)
values = np.array([x.value for x in xdr], dtype=np.int64)
_tz = start.tz if start is not None else end.tz
values = values.view("M8[ns]")
index = cls._simple_new(values, freq=freq, dtype=tz_to_dtype(_tz))
if tz is not None and index.tz is None:
arr = tzconversion.tz_localize_to_utc(
index.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
index = cls(arr)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz, ambiguous, nonexistent).asm8
if end is not None:
end = end.tz_localize(tz, ambiguous, nonexistent).asm8
else:
# Create a linearly spaced date_range in local time
# Nanosecond-granularity timestamps aren't always correctly
# representable with doubles, so we limit the range that we
# pass to np.linspace as much as possible
arr = (
np.linspace(0, end.value - start.value, periods, dtype="int64")
+ start.value
)
dtype = tz_to_dtype(tz)
arr = arr.astype("M8[ns]", copy=False)
index = cls._simple_new(arr, freq=None, dtype=dtype)
if start == end:
if not left_inclusive and not right_inclusive:
index = index[1:-1]
else:
if not left_inclusive or not right_inclusive:
if not left_inclusive and len(index) and index[0] == start:
index = index[1:]
if not right_inclusive and len(index) and index[-1] == end:
index = index[:-1]
dtype = tz_to_dtype(tz)
return cls._simple_new(index._ndarray, freq=freq, dtype=dtype)
# -----------------------------------------------------------------
# DatetimeLike Interface
def _unbox_scalar(self, value, setitem: bool = False) -> np.datetime64:
if not isinstance(value, self._scalar_type) and value is not NaT:
raise ValueError("'value' should be a Timestamp.")
self._check_compatible_with(value, setitem=setitem)
return value.asm8
def _scalar_from_string(self, value) -> Timestamp | NaTType:
return Timestamp(value, tz=self.tz)
def _check_compatible_with(self, other, setitem: bool = False):
if other is NaT:
return
self._assert_tzawareness_compat(other)
if setitem:
# Stricter check for setitem vs comparison methods
if not timezones.tz_compare(self.tz, other.tz):
raise ValueError(f"Timezones don't match. '{self.tz}' != '{other.tz}'")
# -----------------------------------------------------------------
# Descriptive Properties
def _box_func(self, x) -> Timestamp | NaTType:
if isinstance(x, np.datetime64):
# GH#42228
# Argument 1 to "signedinteger" has incompatible type "datetime64";
# expected "Union[SupportsInt, Union[str, bytes], SupportsIndex]"
x = np.int64(x) # type: ignore[arg-type]
ts = Timestamp(x, tz=self.tz)
# Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if ts is not NaT: # type: ignore[comparison-overlap]
# GH#41586
# do this instead of passing to the constructor to avoid FutureWarning
ts._set_freq(self.freq)
return ts
@property
# error: Return type "Union[dtype, DatetimeTZDtype]" of "dtype"
# incompatible with return type "ExtensionDtype" in supertype
# "ExtensionArray"
def dtype(self) -> np.dtype | DatetimeTZDtype: # type: ignore[override]
"""
The dtype for the DatetimeArray.
.. warning::
A future version of pandas will change dtype to never be a
``numpy.dtype``. Instead, :attr:`DatetimeArray.dtype` will
always be an instance of an ``ExtensionDtype`` subclass.
Returns
-------
numpy.dtype or DatetimeTZDtype
If the values are tz-naive, then ``np.dtype('datetime64[ns]')``
is returned.
If the values are tz-aware, then the ``DatetimeTZDtype``
is returned.
"""
return self._dtype
@property
def tz(self) -> tzinfo | None:
"""
Return timezone, if any.
Returns
-------
datetime.tzinfo, pytz.tzinfo.BaseTZInfo, dateutil.tz.tz.tzfile, or None
Returns None when the array is tz-naive.
"""
# GH 18595
return getattr(self.dtype, "tz", None)
@tz.setter
def tz(self, value):
# GH 3746: Prevent localizing or converting the index by setting tz
raise AttributeError(
"Cannot directly set timezone. Use tz_localize() "
"or tz_convert() as appropriate"
)
@property
def tzinfo(self) -> tzinfo | None:
"""
Alias for tz attribute
"""
return self.tz
@property # NB: override with cache_readonly in immutable subclasses
def is_normalized(self) -> bool:
"""
Returns True if all of the dates are at midnight ("no time")
"""
return is_date_array_normalized(self.asi8, self.tz)
@property # NB: override with cache_readonly in immutable subclasses
def _resolution_obj(self) -> Resolution:
return get_resolution(self.asi8, self.tz)
# ----------------------------------------------------------------
# Array-Like / EA-Interface Methods
def __array__(self, dtype=None) -> np.ndarray:
if dtype is None and self.tz:
# The default for tz-aware is object, to preserve tz info
dtype = object
return super().__array__(dtype=dtype)
def __iter__(self):
"""
Return an iterator over the boxed values
Yields
------
tstamp : Timestamp
"""
if self.ndim > 1:
for i in range(len(self)):
yield self[i]
else:
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = 10000
chunks = (length // chunksize) + 1
with warnings.catch_warnings():
# filter out warnings about Timestamp.freq
warnings.filterwarnings("ignore", category=FutureWarning)
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = ints_to_pydatetime(
data[start_i:end_i], tz=self.tz, freq=self.freq, box="timestamp"
)
yield from converted
def astype(self, dtype, copy: bool = True):
# We handle
# --> datetime
# --> period
# DatetimeLikeArrayMixin Super handles the rest.
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
if copy:
return self.copy()
return self
elif is_datetime64_ns_dtype(dtype):
return astype_dt64_to_dt64tz(self, dtype, copy, via_utc=False)
elif self.tz is None and is_datetime64_dtype(dtype) and dtype != self.dtype:
# unit conversion e.g. datetime64[s]
return self._ndarray.astype(dtype)
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy)
# -----------------------------------------------------------------
# Rendering Methods
@dtl.ravel_compat
def _format_native_types(
self, na_rep="NaT", date_format=None, **kwargs
) -> npt.NDArray[np.object_]:
from pandas.io.formats.format import get_format_datetime64_from_values
fmt = get_format_datetime64_from_values(self, date_format)
return tslib.format_array_from_datetime(
self.asi8, tz=self.tz, format=fmt, na_rep=na_rep
)
# -----------------------------------------------------------------
# Comparison Methods
def _has_same_tz(self, other) -> bool:
# vzone shouldn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
if not hasattr(other, "tzinfo"):
return False
other_tz = other.tzinfo
return timezones.tz_compare(self.tzinfo, other_tz)
def _assert_tzawareness_compat(self, other) -> None:
# adapted from _Timestamp._assert_tzawareness_compat
other_tz = getattr(other, "tzinfo", None)
other_dtype = getattr(other, "dtype", None)
if is_datetime64tz_dtype(other_dtype):
# Get tzinfo from Series dtype
other_tz = other.dtype.tz
if other is NaT:
# pd.NaT quacks both aware and naive
pass
elif self.tz is None:
if other_tz is not None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects."
)
elif other_tz is None:
raise TypeError(
"Cannot compare tz-naive and tz-aware datetime-like objects"
)
# -----------------------------------------------------------------
# Arithmetic Methods
def _sub_datetime_arraylike(self, other):
"""subtract DatetimeArray/Index or ndarray[datetime64]"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
assert is_datetime64_dtype(other)
other = type(self)(other)
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
f"{type(self).__name__} subtraction must have the same "
"timezones or no timezones"
)
self_i8 = self.asi8
other_i8 = other.asi8
arr_mask = self._isnan | other._isnan
new_values = checked_add_with_arr(self_i8, -other_i8, arr_mask=arr_mask)
if self._hasnans or other._hasnans:
np.putmask(new_values, arr_mask, iNaT)
return new_values.view("timedelta64[ns]")
def _add_offset(self, offset) -> DatetimeArray:
if self.ndim == 2:
return self.ravel()._add_offset(offset).reshape(self.shape)
assert not isinstance(offset, Tick)
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset._apply_array(values).view("M8[ns]")
result = DatetimeArray._simple_new(result)
result = result.tz_localize(self.tz)
except NotImplementedError:
warnings.warn(
"Non-vectorized DateOffset being applied to Series or DatetimeIndex.",
PerformanceWarning,
)
result = self.astype("O") + offset
if not len(self):
# GH#30336 _from_sequence won't be able to infer self.tz
return type(self)._from_sequence(result).tz_localize(self.tz)
return type(self)._from_sequence(result)
def _sub_datetimelike_scalar(self, other):
# subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
assert isinstance(other, (datetime, np.datetime64))
assert other is not NaT
other = Timestamp(other)
# error: Non-overlapping identity check (left operand type: "Timestamp",
# right operand type: "NaTType")
if other is NaT: # type: ignore[comparison-overlap]
return self - NaT
if not self._has_same_tz(other):
# require tz compat
raise TypeError(
"Timestamp subtraction must have the same timezones or no timezones"
)
i8 = self.asi8
result = checked_add_with_arr(i8, -other.value, arr_mask=self._isnan)
result = self._maybe_mask_results(result)
return result.view("timedelta64[ns]")
# -----------------------------------------------------------------
# Timezone Conversion and Localization Methods
def _local_timestamps(self) -> np.ndarray:
"""
Convert to an i8 (unix-like nanosecond timestamp) representation
while keeping the local timezone and not using UTC.
This is used to calculate time-of-day information as if the timestamps
were timezone-naive.
"""
if self.tz is None or timezones.is_utc(self.tz):
return self.asi8
return tzconversion.tz_convert_from_utc(self.asi8, self.tz)
def tz_convert(self, tz) -> DatetimeArray:
"""
Convert tz-aware Datetime Array/Index from one time zone to another.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index. A `tz` of None will
convert to UTC and remove the timezone information.
Returns
-------
Array or Index
Raises
------
TypeError
If Datetime Array/Index is tz-naive.
See Also
--------
DatetimeIndex.tz : A timezone that has a variable offset from UTC.
DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a
given time zone, or remove timezone from a tz-aware DatetimeIndex.
Examples
--------
With the `tz` parameter, we can change the DatetimeIndex
to other time zones:
>>> dti = pd.date_range(start='2014-08-01 09:00',
... freq='H', periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert('US/Central')
DatetimeIndex(['2014-08-01 02:00:00-05:00',
'2014-08-01 03:00:00-05:00',
'2014-08-01 04:00:00-05:00'],
dtype='datetime64[ns, US/Central]', freq='H')
With the ``tz=None``, we can remove the timezone (after converting
to UTC if necessary):
>>> dti = pd.date_range(start='2014-08-01 09:00', freq='H',
... periods=3, tz='Europe/Berlin')
>>> dti
DatetimeIndex(['2014-08-01 09:00:00+02:00',
'2014-08-01 10:00:00+02:00',
'2014-08-01 11:00:00+02:00'],
dtype='datetime64[ns, Europe/Berlin]', freq='H')
>>> dti.tz_convert(None)
DatetimeIndex(['2014-08-01 07:00:00',
'2014-08-01 08:00:00',
'2014-08-01 09:00:00'],
dtype='datetime64[ns]', freq='H')
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError(
"Cannot convert tz-naive timestamps, use tz_localize to localize"
)
# No conversion since timestamps are all UTC to begin with
dtype = tz_to_dtype(tz)
return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq)
@dtl.ravel_compat
def tz_localize(self, tz, ambiguous="raise", nonexistent="raise") -> DatetimeArray:
"""
Localize tz-naive Datetime Array/Index to tz-aware
Datetime Array/Index.
This method takes a time zone (tz) naive Datetime Array/Index object
and makes this time zone aware. It does not move the time to another
time zone.
This method can also be used to do the inverse -- to create a time
zone unaware object from an aware object. To that end, pass `tz=None`.
Parameters
----------
tz : str, pytz.timezone, dateutil.tz.tzfile or None
Time zone to convert timestamps to. Passing ``None`` will
remove the time zone information preserving local time.
ambiguous : 'infer', 'NaT', bool array, default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times.
nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \
default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST.
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times.
Returns
-------
Same type as self
Array/Index converted to the specified time zone.
Raises
------
TypeError
If the Datetime Array/Index is tz-aware and tz is not None.
See Also
--------
DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from
one time zone to another.
Examples
--------
>>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3)
>>> tz_naive
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq='D')
Localize DatetimeIndex in US/Eastern time zone:
>>> tz_aware = tz_naive.tz_localize(tz='US/Eastern')
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
With the ``tz=None``, we can remove the time zone information
while keeping the local time (not converted to UTC):
>>> tz_aware.tz_localize(None)
DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00',
'2018-03-03 09:00:00'],
dtype='datetime64[ns]', freq=None)
Be careful with DST changes. When there is sequential data, pandas can
infer the DST time:
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.dt.tz_localize('CET', ambiguous='infer')
0 2018-10-28 01:30:00+02:00
1 2018-10-28 02:00:00+02:00
2 2018-10-28 02:30:00+02:00
3 2018-10-28 02:00:00+01:00
4 2018-10-28 02:30:00+01:00
5 2018-10-28 03:00:00+01:00
6 2018-10-28 03:30:00+01:00
dtype: datetime64[ns, CET]
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False]))
0 2018-10-28 01:20:00+02:00
1 2018-10-28 02:36:00+02:00
2 2018-10-28 03:46:00+01:00
dtype: datetime64[ns, CET]
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
0 2015-03-29 03:00:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
0 2015-03-29 01:59:59.999999999+01:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
>>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
0 2015-03-29 03:30:00+02:00
1 2015-03-29 03:30:00+02:00
dtype: datetime64[ns, Europe/Warsaw]
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise', "
"'NaT', 'shift_forward', 'shift_backward' or "
"a timedelta object"
)
if self.tz is not None:
if tz is None:
new_dates = tzconversion.tz_convert_from_utc(self.asi8, self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = tzconversion.tz_localize_to_utc(
self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent
)
new_dates = new_dates.view(DT64NS_DTYPE)
dtype = tz_to_dtype(tz)
freq = None
if timezones.is_utc(tz) or (len(self) == 1 and not isna(new_dates[0])):
# we can preserve freq
# TODO: Also for fixed-offsets
freq = self.freq
elif tz is None and self.tz is None:
# no-op
freq = self.freq
return self._simple_new(new_dates, dtype=dtype, freq=freq)
# ----------------------------------------------------------------
# Conversion Methods - Vectorized analogues of Timestamp methods
def to_pydatetime(self) -> npt.NDArray[np.object_]:
"""
Return Datetime Array/Index as object ndarray of datetime.datetime
objects.
Returns
-------
datetimes : ndarray[object]
"""
return ints_to_pydatetime(self.asi8, tz=self.tz)
def normalize(self) -> DatetimeArray:
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.date_range(start='2014-08-01 10:00', freq='H',
... periods=3, tz='Asia/Calcutta')
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq='H')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
"""
new_values = normalize_i8_timestamps(self.asi8, self.tz)
return type(self)(new_values)._with_freq("infer").tz_localize(self.tz)
@dtl.ravel_compat
def to_period(self, freq=None) -> PeriodArray:
"""
Cast to PeriodArray/Index at a particular frequency.
Converts DatetimeArray/Index to PeriodArray/Index.
Parameters
----------
freq : str or Offset, optional
One of pandas' :ref:`offset strings <timeseries.offset_aliases>`
or an Offset object. Will be inferred by default.
Returns
-------
PeriodArray/Index
Raises
------
ValueError
When converting a DatetimeArray/Index with non-regular values,
so that a frequency cannot be inferred.
See Also
--------
PeriodIndex: Immutable ndarray holding ordinal values.
DatetimeIndex.to_pydatetime: Return DatetimeIndex as object.
Examples
--------
>>> df = pd.DataFrame({"y": [1, 2, 3]},
... index=pd.to_datetime(["2000-03-31 00:00:00",
... "2000-05-31 00:00:00",
... "2000-08-31 00:00:00"]))
>>> df.index.to_period("M")
PeriodIndex(['2000-03', '2000-05', '2000-08'],
dtype='period[M]')
Infer the daily frequency
>>> idx = pd.date_range("2017-01-01", periods=2)
>>> idx.to_period()
PeriodIndex(['2017-01-01', '2017-01-02'],
dtype='period[D]')
"""
from pandas.core.arrays import PeriodArray
if self.tz is not None:
warnings.warn(
"Converting to PeriodArray/Index representation "
"will drop timezone information.",
UserWarning,
)
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
raise ValueError(
"You must pass a freq argument as current index has none."
)
res = get_period_alias(freq)
# https://github.com/pandas-dev/pandas/issues/33358
if res is None:
res = freq
freq = res
return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz)
def to_perioddelta(self, freq) -> TimedeltaArray:
"""
Calculate TimedeltaArray of difference between index
values and index converted to PeriodArray at specified
freq. Used for vectorized offsets.
Parameters
----------
freq : Period frequency
Returns
-------
TimedeltaArray/Index
"""
# Deprecaation GH#34853
warnings.warn(
"to_perioddelta is deprecated and will be removed in a "
"future version. "
"Use `dtindex - dtindex.to_period(freq).to_timestamp()` instead.",
FutureWarning,
# stacklevel chosen to be correct for when called from DatetimeIndex
stacklevel=3,
)
from pandas.core.arrays.timedeltas import TimedeltaArray
i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8
m8delta = i8delta.view("m8[ns]")
return TimedeltaArray(m8delta)
# -----------------------------------------------------------------
# Properties - Vectorized Timestamp Properties/Methods
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(values, "month_name", locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
def day_name(self, locale=None):
"""
Return the day names of the DateTimeIndex with specified locale.
Parameters
----------
locale : str, optional
Locale determining the language in which to return the day name.
Default is English locale.
Returns
-------
Index
Index of day names.
Examples
--------
>>> idx = pd.date_range(start='2018-01-01', freq='D', periods=3)
>>> idx
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', freq='D')
>>> idx.day_name()
Index(['Monday', 'Tuesday', 'Wednesday'], dtype='object')
"""
values = self._local_timestamps()
result = fields.get_date_name_field(values, "day_name", locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
@property
def time(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
timestamps = self._local_timestamps()
return ints_to_pydatetime(timestamps, box="time")
@property
def timetz(self) -> npt.NDArray[np.object_]:
"""
Returns numpy array of datetime.time also containing timezone
information. The time part of the Timestamps.
"""
return | ints_to_pydatetime(self.asi8, self.tz, box="time") | pandas._libs.tslibs.ints_to_pydatetime |
from abc import ABC
from abc import abstractmethod
from typing import List
from typing import Optional
import pandas as pd
from etna.transforms.base import Transform
class WindowStatisticsTransform(Transform, ABC):
"""WindowStatisticsTransform handles computation of statistical features on windows."""
def __init__(
self,
in_column: str,
out_column: str,
window: int,
seasonality: int = 1,
min_periods: int = 1,
fillna: float = 0,
**kwargs,
):
"""Init WindowStatisticsTransform.
Parameters
----------
in_column: str
name of processed column
window: int
size of window to aggregate, if -1 is set all history is used
out_column: str
result column name
seasonality: int
seasonality of lags to compute window's aggregation with
min_periods: int
min number of targets in window to compute aggregation; if there is less than min_periods number of targets
return None
fillna: float
value to fill results NaNs with
"""
self.in_column = in_column
self.out_column_name = out_column
self.window = window
self.seasonality = seasonality
self.min_periods = min_periods
self.fillna = fillna
self.kwargs = kwargs
self.min_required_len = max(self.min_periods - 1, 0) * self.seasonality + 1
self.history = self.window * self.seasonality
def fit(self, *args) -> "WindowStatisticsTransform":
"""Fits transform."""
return self
def _get_required_lags(self, series: pd.Series) -> pd.Series:
"""Get lags according to given seasonality."""
return pd.Series(series.values[::-1][:: self.seasonality])
@abstractmethod
def _aggregate_window(self, series: pd.Series) -> float:
"""Aggregate targets from given series."""
pass
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""Compute feature's value.
Parameters
----------
df: pd.DataFrame
dataframe to generate features for
Returns
-------
result: pd.DataFrame
dataframe with results
"""
features = (
df.xs(self.in_column, level=1, axis=1)
.shift(1)
.rolling(
window=self.seasonality * self.window if self.window != -1 else len(df) - 1,
min_periods=self.min_required_len,
)
.aggregate(self._aggregate_window)
)
features.fillna(value=self.fillna, inplace=True)
dataframes = []
for seg in df.columns.get_level_values(0).unique():
feature = features[seg].rename(self.out_column_name)
tmp = df[seg].join(feature)
_idx = tmp.columns.to_frame()
_idx.insert(0, "segment", seg)
tmp.columns = | pd.MultiIndex.from_frame(_idx) | pandas.MultiIndex.from_frame |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import inspect
import warnings
from ._utils import get_string
is_pandas_installed = True
try:
import pandas as pd
except ImportError:
is_pandas_installed = False
class Iterator(object):
def __init__(self, module, function):
assert inspect.ismodule(module), "{module} must be of type module".format(
module=module
)
self.module = module
self.function = function
def __iter__(self):
self.module.First()
while True:
yield getattr(self.module, self.function)
if not self.module.Next() > 0:
break
def run_command(text, dss=None):
"""Use Text interface of OpenDSS"""
if dss is None:
import opendssdirect as dss
r = []
for l in text.splitlines():
dss.dss_lib.Text_Set_Command(l.encode("ascii"))
r.append(get_string(dss.dss_lib.Text_Get_Result()))
return "\n".join(r).strip()
def to_dataframe(module):
data = dict()
for e in Iterator(module, "Name"):
data[e()] = dict()
if len(data) != 0:
for i in Iterator(module, "Name"):
element_name = i()
data[element_name] = {
n: getattr(module, n)() for n, f in getmembers(module)
}
else:
class_name = module.__name__
warnings.warn("Empty element type ({class_name})".format(class_name=class_name))
if is_pandas_installed:
return pd.DataFrame(data).T
else:
warnings.warn(
"Pandas cannot be installed. Please see documentation for how to install extra dependencies."
)
return data
def _clean_data(data, class_name):
import opendssdirect as dss
for element in dss.ActiveClass.AllNames():
name = "{class_name}.{element}".format(class_name=class_name, element=element)
dss.ActiveClass.Name(element)
if "nconds" in dss.Element.AllPropertyNames():
nconds = int(data[name]["nconds"])
x = []
h = []
units = []
for cond in range(1, nconds + 1):
dss.run_command("{name}.cond={cond}".format(name=name, cond=cond))
x.append(float(dss.run_command("? {name}.x".format(name=name))))
h.append(float(dss.run_command("? {name}.h".format(name=name))))
units.append(dss.run_command("? {name}.units".format(name=name)))
data[name]["x"] = x
data[name]["h"] = h
data[name]["units"] = units
return data
def class_to_dataframe(class_name, dss=None, transform_string=None, clean_data=None):
if transform_string is None:
transform_string = _evaluate_expression
if clean_data is None:
clean_data = _clean_data
if not callable(transform_string):
raise TypeError(
"The `transform_string` must be a callable. Please check the documentation or contact the developer."
)
if dss is None:
import opendssdirect as dss
dss.Circuit.SetActiveClass("{class_name}".format(class_name=class_name))
if class_name.lower() != dss.ActiveClass.ActiveClassName().lower():
raise NotImplementedError(
"`{class_name}` is not supported by the `class_to_dataframe` interface, please contact the developer for more information.".format(
class_name=class_name
)
)
data = dict()
for element in dss.ActiveClass.AllNames():
name = "{class_name}.{element}".format(class_name=class_name, element=element)
dss.ActiveClass.Name(element)
data[name] = dict()
for i, n in enumerate(dss.Element.AllPropertyNames()):
# use 1-based index for compatibility with previous versions
string = dss.Properties.Value(str(i + 1))
data[name][n] = transform_string(string)
data = clean_data(data, class_name)
if is_pandas_installed:
return | pd.DataFrame(data) | pandas.DataFrame |
from __future__ import absolute_import, division, unicode_literals
import unittest
import jsonpickle
from helper import SkippableTest
try:
import pandas as pd
import numpy as np
from pandas.testing import assert_series_equal
from pandas.testing import assert_frame_equal
from pandas.testing import assert_index_equal
except ImportError:
np = None
class PandasTestCase(SkippableTest):
def setUp(self):
if np is None:
self.should_skip = True
return
self.should_skip = False
import jsonpickle.ext.pandas
jsonpickle.ext.pandas.register_handlers()
def tearDown(self):
if self.should_skip:
return
import jsonpickle.ext.pandas
jsonpickle.ext.pandas.unregister_handlers()
def roundtrip(self, obj):
return jsonpickle.decode(jsonpickle.encode(obj))
def test_series_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
ser = pd.Series(
{
'an_int': np.int_(1),
'a_float': np.float_(2.5),
'a_nan': np.nan,
'a_minus_inf': -np.inf,
'an_inf': np.inf,
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.datetime64('2014-01-01'),
'complex': np.complex_(1 - 2j),
# TODO: the following dtypes are not currently supported.
# 'object': np.object_({'a': 'b'}),
}
)
decoded_ser = self.roundtrip(ser)
assert_series_equal(decoded_ser, ser)
def test_dataframe_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
df = pd.DataFrame(
{
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
'date': np.array([np.datetime64('2014-01-01')] * 3),
'complex': np.complex_([1 - 2j, 2 - 1.2j, 3 - 1.3j]),
# TODO: the following dtypes are not currently supported.
# 'object': np.object_([{'a': 'b'}]*3),
}
)
decoded_df = self.roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_multindex_dataframe_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
df = pd.DataFrame(
{
'idx_lvl0': ['a', 'b', 'c'],
'idx_lvl1': np.int_([1, 1, 2]),
'an_int': np.int_([1, 2, 3]),
'a_float': np.float_([2.5, 3.5, 4.5]),
'a_nan': np.array([np.nan] * 3),
'a_minus_inf': np.array([-np.inf] * 3),
'an_inf': np.array([np.inf] * 3),
'a_str': np.str_('foo'),
'a_unicode': np.unicode_('bar'),
}
)
df = df.set_index(['idx_lvl0', 'idx_lvl1'])
decoded_df = self.roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_dataframe_with_interval_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
df = pd.DataFrame(
{'a': [1, 2], 'b': [3, 4]}, index=pd.IntervalIndex.from_breaks([1, 2, 4])
)
decoded_df = self.roundtrip(df)
assert_frame_equal(decoded_df, df)
def test_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.Index(range(5, 10))
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_datetime_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.date_range(start='2019-01-01', end='2019-02-01', freq='D')
decoded_idx = self.roundtrip(idx)
assert_index_equal(decoded_idx, idx)
def test_ragged_datetime_index_roundtrip(self):
if self.should_skip:
return self.skip('pandas is not importable')
idx = pd.DatetimeIndex(['2019-01-01', '2019-01-02', '2019-01-05'])
decoded_idx = self.roundtrip(idx)
| assert_index_equal(decoded_idx, idx) | pandas.testing.assert_index_equal |
# -*- coding: utf-8 -*-
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex, Categorical
from pandas.compat import StringIO
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
class DtypeTests(object):
def test_passing_dtype(self):
# see gh-6607
df = DataFrame(np.random.rand(5, 2).round(4), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# see gh-3795: passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
# for parsing, interpret object as str
result = self.read_csv(path, dtype=object, index_col=0)
tm.assert_frame_equal(result, expected)
# we expect all object columns, so need to
# convert to test for equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# see gh-12048: empty frame
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
assert result['one'].dtype == 'u1'
assert result['two'].dtype == 'object'
def test_categorical_dtype(self):
# GH 10153
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': | Categorical(['a', 'a', 'b']) | pandas.Categorical |
import pandas as pd
import numpy as np
import pytest
import re
import tubular
import tubular.testing.helpers as h
import tubular.testing.test_data as data_generators_p
import input_checker
from input_checker._version import __version__
from input_checker.checker import InputChecker
from input_checker.exceptions import InputCheckerError
class TestInit(object):
"""Tests for InputChecker.init()."""
def test_super_init_called(self, mocker):
"""Test that init calls BaseTransformer.init."""
expected_call_args = {0: {"args": (), "kwargs": {"columns": ["a", "b"]}}}
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "__init__", expected_call_args
):
InputChecker(columns=["a", "b"])
def test_inheritance(self):
"""Test that InputChecker inherits from tubular.base.BaseTransformer."""
x = InputChecker()
h.assert_inheritance(x, tubular.base.BaseTransformer)
def test_arguments(self):
"""Test that InputChecker init has expected arguments."""
h.test_function_arguments(
func=InputChecker.__init__,
expected_arguments=[
"self",
"columns",
"categorical_columns",
"numerical_columns",
"datetime_columns",
"skip_infer_columns",
],
expected_default_values=(None, None, None, None, None),
)
def test_version_attribute(self):
"""Test that __version__ attribute takes expected value."""
x = InputChecker(columns=["a"])
h.assert_equal_dispatch(
expected=__version__,
actual=x.version_,
msg="__version__ attribute",
)
def test_columns_attributes_generated(self):
"""Test all columns attributes are saved with InputChecker init"""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b"],
datetime_columns=["d"],
skip_infer_columns=["c"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert hasattr(x, "columns") is True, "columns attribute not present after init"
assert (
hasattr(x, "numerical_columns") is True
), "numerical_columns attribute not present after init"
assert (
hasattr(x, "categorical_columns") is True
), "categorical_columns attribute not present after init"
assert (
hasattr(x, "datetime_columns") is True
), "datetime_columns attribute not present after init"
assert (
hasattr(x, "skip_infer_columns") is True
), "skip_infer_columns attribute not present after init"
def test_check_type_called(self, mocker):
"""Test all check type is called by the init method."""
spy = mocker.spy(input_checker.checker.InputChecker, "_check_type")
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b"],
datetime_columns=["d"],
skip_infer_columns=["c"],
)
assert (
spy.call_count == 5
), "unexpected number of calls to InputChecker._check_type with init"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
call_3_args = spy.call_args_list[3]
call_3_pos_args = call_3_args[0]
call_4_args = spy.call_args_list[4]
call_4_pos_args = call_4_args[0]
expected_pos_args_0 = (
x,
["a", "b", "c", "d"],
"input columns",
[list, type(None), str],
)
expected_pos_args_1 = (
x,
["b"],
"categorical columns",
[list, str, type(None)],
)
expected_pos_args_2 = (
x,
["a"],
"numerical columns",
[list, dict, str, type(None)],
)
expected_pos_args_3 = (
x,
["d"],
"datetime columns",
[list, dict, str, type(None)],
)
expected_pos_args_4 = (
x,
["c"],
"skip infer columns",
[list, type(None)],
)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _check_type call for columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _check_type call for categorical columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _check_type call for numerical columns argument"
assert (
expected_pos_args_3 == call_3_pos_args
), "positional args unexpected in _check_type call for datetime columns argument"
assert (
expected_pos_args_4 == call_4_pos_args
), "positional args unexpected in _check_type call for skip infer columns argument"
def test_check_is_string_value_called(self, mocker):
"""Test all check string is called by the init method when option set to infer."""
spy = mocker.spy(input_checker.checker.InputChecker, "_is_string_value")
x = InputChecker(
numerical_columns="infer",
categorical_columns="infer",
datetime_columns="infer",
)
assert (
spy.call_count == 3
), "unexpected number of calls to InputChecker._is_string_value with init"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
expected_pos_args_0 = (x, x.categorical_columns, "categorical columns", "infer")
expected_pos_args_1 = (x, x.numerical_columns, "numerical columns", "infer")
expected_pos_args_2 = (x, x.datetime_columns, "datetime columns", "infer")
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _is_string_value call for numerical columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _is_string_value call for categorical columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _is_string_value call for categorical columns argument"
def test_check_is_empty_called(self, mocker):
"""Test all check is empty is called by the init method."""
spy = mocker.spy(input_checker.checker.InputChecker, "_is_empty")
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
assert (
spy.call_count == 4
), "unexpected number of calls to InputChecker._is_empty with init"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
call_3_args = spy.call_args_list[3]
call_3_pos_args = call_3_args[0]
expected_pos_args_0 = (x, "input columns", ["a", "b", "c", "d"])
expected_pos_args_1 = (x, "categorical columns", ["b", "c"])
expected_pos_args_2 = (x, "numerical columns", ["a"])
expected_pos_args_3 = (x, "datetime columns", ["d"])
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _is_empty call for categorical columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _is_empty call for numerical columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _is_empty call for numerical columns argument"
assert (
expected_pos_args_3 == call_3_pos_args
), "positional args unexpected in _is_empty call for numerical columns argument"
def test_check_is_listed_in_columns_called(self, mocker):
spy = mocker.spy(input_checker.checker.InputChecker, "_is_listed_in_columns")
InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker._is_listed_in_columns with init"
class TestConsolidateInputs(object):
def test_arguments(self):
"""Test that _consolidate_inputs has expected arguments."""
h.test_function_arguments(
func=InputChecker._consolidate_inputs,
expected_arguments=["self", "X"],
expected_default_values=None,
)
def test_infer_datetime_columns(self):
"""Test that _consolidate_inputs infers the correct datetime columns"""
x = InputChecker(datetime_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["e"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08-04-2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert x.datetime_columns == [
"d",
"e",
], "infer datetime not finding correct columns"
def test_infer_datetime_dict(self):
"""Test that _consolidate_inputs infers the correct datetime dict"""
x = InputChecker(datetime_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
x.datetime_dict["d"]["maximum"] is False
), "infer numerical not specifying maximum value check as true"
assert (
x.datetime_dict["d"]["minimum"] is True
), "infer numerical not specifying maximum value check as true"
def test_infer_categorical_columns(self):
"""Test that _consolidate_inputs infers the correct categorical columns"""
x = InputChecker(categorical_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = [True, True, False, True, True, False, np.nan]
df["d"] = df["d"].astype("bool")
x.fit(df)
assert x.categorical_columns == [
"b",
"c",
"d",
], "infer categorical not finding correct columns"
def test_infer_numerical_columns(self):
"""Test that _consolidate_inputs infers the correct numerical columns"""
x = InputChecker(numerical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert x.numerical_columns == [
"a"
], "infer numerical not finding correct columns"
def test_infer_numerical_skips_infer_columns(self):
"""Test that _consolidate_inputs skips right columns when inferring numerical"""
x = InputChecker(numerical_columns="infer", skip_infer_columns=["a"])
df = data_generators_p.create_df_2()
df["d"] = df["a"]
x.fit(df)
assert x.numerical_columns == [
"d"
], "infer numerical not finding correct columns when skipping infer columns"
def test_infer_categorical_skips_infer_columns(self):
"""Test that _consolidate_inputs skips right columns when inferring categorical"""
x = InputChecker(categorical_columns="infer", skip_infer_columns=["b"])
df = data_generators_p.create_df_2()
x.fit(df)
assert x.categorical_columns == [
"c"
], "infer categorical not finding correct columns when skipping infer columns"
def test_infer_datetime_skips_infer_columns(self):
"""Test that _consolidate_inputs skips right columns when inferring datetime"""
x = InputChecker(datetime_columns="infer", skip_infer_columns=["d"])
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["a"] = df["d"]
x.fit(df)
assert x.datetime_columns == [
"a"
], "infer datetime not finding correct columns when skipping infer columns"
def test_infer_numerical_dict(self):
"""Test that _consolidate_inputs infers the correct numerical dict"""
x = InputChecker(numerical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert (
x.numerical_dict["a"]["maximum"] is True
), "infer numerical not specifying maximum value check as true"
assert (
x.numerical_dict["a"]["minimum"] is True
), "infer numerical not specifying minimum value check as true"
def test_datetime_type(self):
"""Test that datetime columns is a list after calling _consolidate_inputs"""
x = InputChecker(datetime_columns="infer")
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
type(x.datetime_columns) is list
), f"incorrect datetime_columns type returned from _consolidate_inputs - expected: list but got: {type(x.datetime_columns)} "
def test_categorical_type(self):
"""Test that categorical columns is a list after calling _consolidate_inputs"""
x = InputChecker(categorical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert (
type(x.categorical_columns) is list
), f"incorrect categorical_columns type returned from _consolidate_inputs - expected: list but got: {type(x.categorical_columns)} "
def test_numerical_type(self):
"""Test that numerical columns and dict are a list and dict after calling _consolidate_inputs"""
x = InputChecker(numerical_columns="infer")
df = data_generators_p.create_df_2()
x.fit(df)
assert (
type(x.numerical_columns) is list
), f"incorrect numerical_columns type returned from _consolidate_inputs - expected: list but got: {type(x.numerical_columns)} "
assert (
type(x.numerical_dict) is dict
), f"incorrect numerical_dict type returned from _consolidate_inputs - expected: dict but got: {type(x.numerical_dict)} "
def test_check_is_subset_called(self, mocker):
"""Test all check _is_subset is called by the _consolidate_inputs method."""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["c"],
datetime_columns=["d"],
skip_infer_columns=["b"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
spy = mocker.spy(input_checker.checker.InputChecker, "_is_subset")
x.fit(df)
assert (
spy.call_count == 5
), "unexpected number of calls to InputChecker._is_subset with _consolidate_inputs"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
call_1_args = spy.call_args_list[1]
call_1_pos_args = call_1_args[0]
call_2_args = spy.call_args_list[2]
call_2_pos_args = call_2_args[0]
call_3_args = spy.call_args_list[3]
call_3_pos_args = call_3_args[0]
call_4_args = spy.call_args_list[4]
call_4_pos_args = call_4_args[0]
expected_pos_args_0 = (x, "skip infer columns", ["b"], df)
expected_pos_args_1 = (x, "input columns", ["a", "b", "c", "d"], df)
expected_pos_args_2 = (x, "categorical columns", ["c"], df)
expected_pos_args_3 = (x, "numerical columns", ["a"], df)
expected_pos_args_4 = (x, "datetime columns", ["d"], df)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _is_subset call for skip_infer_columns columns argument"
assert (
expected_pos_args_1 == call_1_pos_args
), "positional args unexpected in _is_subset call for input columns argument"
assert (
expected_pos_args_2 == call_2_pos_args
), "positional args unexpected in _is_subset call for categorical columns argument"
assert (
expected_pos_args_3 == call_3_pos_args
), "positional args unexpected in _is_subset call for numerical columns argument"
assert (
expected_pos_args_4 == call_4_pos_args
), "positional args unexpected in _is_subset call for datetime columns argument"
class TestFitTypeChecker(object):
"""Tests for InputChecker._fit_type_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_type_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_type_checker, expected_arguments=["self", "X"]
)
def test_no_column_classes_before_fit(self):
"""Test column_classes is not present before fit called"""
x = InputChecker()
assert (
hasattr(x, "column_classes") is False
), "column_classes attribute present before fit"
def test_column_classes_after_fit(self):
"""Test column_classes is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
assert hasattr(
x, "column_classes"
), "column_classes attribute not present after fit"
def test_correct_columns_classes(self):
"""Test fit type checker saves types for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(columns=["a"])
x.fit(df)
assert list(x.column_classes.keys()) == [
"a"
], f"incorrect values returned from _fit_value_checker - expected: ['a'] but got: {list(x.column_classes.keys())}"
def test_correct_classes_identified(self):
"""Test fit type checker identifies correct classes is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
x.column_classes["a"] == "float64"
), f"incorrect type returned from _fit_type_checker for column 'a' - expected: float64 but got: {x.column_classes['a']}"
assert (
x.column_classes["b"] == "object"
), f"incorrect type returned from _fit_type_checker for column 'b' - expected: object but got: {x.column_classes['b']}"
assert (
x.column_classes["c"] == "category"
), f"incorrect type returned from _fit_type_checker for column 'c' - expected: category but got: {x.column_classes['c']}"
assert (
x.column_classes["d"] == "datetime64[ns]"
), f"incorrect type returned from _fit_type_checker for column 'd' - expected: datetime64[ns] but got: {x.column_classes['d']}"
class TestFitNullChecker(object):
"""Tests for InputChecker._fit_null_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_null_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_null_checker, expected_arguments=["self", "X"]
)
def test_no_expected_values_before_fit(self):
"""Test null_map is not present before fit called"""
x = InputChecker()
assert hasattr(x, "null_map") is False, "null_map attribute present before fit"
def test_expected_values_after_fit(self):
"""Test null_map is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
assert hasattr(x, "null_map"), "null_map attribute not present after fit"
def test_correct_columns_nulls(self):
"""Test fit nulls checker saves map for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(columns=["a"])
x.fit(df)
assert list(x.null_map.keys()) == [
"a"
], f"incorrect values returned from _fit_null_checker - expected: ['a'] but got: {list(x.null_map.keys())}"
def test_correct_classes_identified(self):
"""Test fit null checker identifies correct columns with nulls after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker()
df["b"] = df["b"].fillna("a")
x.fit(df)
assert (
x.null_map["a"] == 1
), f"incorrect values returned from _fit_null_checker - expected: 1 but got: {x.null_map['a']}"
assert (
x.null_map["b"] == 0
), f"incorrect values returned from _fit_null_checker - expected: 0 but got: {x.null_map['b']}"
assert (
x.null_map["c"] == 1
), f"incorrect values returned from _fit_null_checker - expected: 1 but got: {x.null_map['c']}"
class TestFitValueChecker(object):
"""Tests for InputChecker._fit_value_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_value_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_value_checker, expected_arguments=["self", "X"]
)
def test_no_expected_values_before_fit(self):
"""Test expected_values is not present before fit called"""
x = InputChecker(categorical_columns=["b", "c"])
assert (
hasattr(x, "expected_values") is False
), "expected_values attribute present before fit"
def test_expected_values_after_fit(self):
"""Test expected_values is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
assert hasattr(
x, "expected_values"
), "expected_values attribute not present after fit"
def test_correct_columns_map(self):
"""Test fit value checker saves levels for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
assert list(x.expected_values.keys()) == [
"b",
"c",
], f"incorrect values returned from _fit_value_checker - expected: ['b', 'c'] but got: {list(x.expected_values.keys())}"
def test_correct_values_identified(self):
"""Test fit value checker identifies corrcet levels after fit called"""
df = data_generators_p.create_df_2()
df["d"] = [True, True, False, True, True, False, np.nan]
df["d"] = df["d"].astype("bool")
x = InputChecker(categorical_columns=["b", "c", "d"])
x.fit(df)
assert x.expected_values["b"] == [
"a",
"b",
"c",
"d",
"e",
"f",
np.nan,
], f"incorrect values returned from _fit_value_checker - expected: ['a', 'b', 'c', 'd', 'e', 'f', np.nan] but got: {x.expected_values['b']}"
assert x.expected_values["c"] == [
"a",
"b",
"c",
"d",
"e",
"f",
np.nan,
], f"incorrect values returned from _fit_value_checker - expected: ['a', 'b', 'c', 'd', 'e', 'f', np.nan] but got: {x.expected_values['c']}"
assert x.expected_values["d"] == [
True,
False,
], f"incorrect values returned from _fit_value_checker - expected: [True, False, np.nan] but got: {x.expected_values['d']}"
class TestFitNumericalChecker(object):
"""Tests for InputChecker._fit_numerical_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_numerical_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_numerical_checker, expected_arguments=["self", "X"]
)
def test_no_expected_values_before_fit(self):
"""Test numerical_values is not present before fit called"""
x = InputChecker()
assert (
hasattr(x, "numerical_values") is False
), "numerical_values attribute present before fit"
def test_expected_values_after_fit(self):
"""Test numerical_values is present after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
assert hasattr(
x, "numerical_values"
), "numerical_values attribute not present after fit"
def test_correct_columns_num_values(self):
"""Test fit numerical checker saves values for correct columns after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
assert list(x.numerical_values.keys()) == [
"a"
], f"incorrect values returned from numerical_values - expected: ['a'] but got: {list(x.numerical_values.keys())}"
def test_correct_numerical_values_identified(self):
"""Test fit numerical checker identifies correct range values after fit called"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
assert (
x.numerical_values["a"]["maximum"] == 6
), f"incorrect values returned from _fit_numerical_checker - expected: 1 but got: {x.numerical_values['a']['maximum']}"
assert (
x.numerical_values["a"]["minimum"] == 1
), f"incorrect values returned from _fit_numerical_checker - expected: 0 but got: {x.numerical_values['a']['minimum']}"
def test_correct_numerical_values_identified_dict(self):
"""Test fit numerical checker identifies correct range values after fit called when inputting a dictionary"""
df = data_generators_p.create_df_2()
numerical_dict = {}
numerical_dict["a"] = {}
numerical_dict["a"]["maximum"] = True
numerical_dict["a"]["minimum"] = False
x = InputChecker(numerical_columns=numerical_dict)
x.fit(df)
assert (
x.numerical_values["a"]["maximum"] == 6
), f"incorrect values returned from _fit_numerical_checker - expected: 1 but got: {x.numerical_values['a']['maximum']}"
assert (
x.numerical_values["a"]["minimum"] is None
), f"incorrect values returned from _fit_numerical_checker - expected: None but got: {x.numerical_values['a']['minimum']}"
class TestFitDatetimeChecker(object):
"""Tests for InputChecker._fit_datetime_checker()."""
def test_arguments(self):
"""Test that InputChecker _fit_value_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._fit_datetime_checker, expected_arguments=["self", "X"]
)
def test_no_datetime_values_before_fit(self):
"""Test expected_values is not present before fit called"""
x = InputChecker(datetime_columns=["b", "c"])
assert (
hasattr(x, "datetime_values") is False
), "datetime_values attribute present before fit"
def test_datetime_values_after_fit(self):
"""Test datetime_values is present after fit called"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["e"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08-04-2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d", "e"])
x.fit(df)
assert hasattr(
x, "datetime_values"
), "datetime_values attribute not present after fit"
def test_correct_columns_map(self):
"""Test fit datetime checker saves minimum dates for correct columns after fit called"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
df["e"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08-04-2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d", "e"])
x.fit(df)
assert list(x.datetime_values.keys()) == [
"d",
"e",
], f"incorrect values returned from _fit_datetime_checker - expected: ['d', 'e'] but got: {list(x.datetime_values.keys())} "
def test_correct_datetime_values_identified(self):
"""Test fit datetime checker identifies correct minimum bound after fit called"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
expected_min_d = pd.to_datetime("15/10/2018").date()
actual_min_d = x.datetime_values["d"]["minimum"]
actual_max_d = x.datetime_values["d"]["maximum"]
assert (
actual_min_d == expected_min_d
), f"incorrect values returned from _fit_datetime_checker - expected: {expected_min_d}, but got: {actual_min_d}"
assert (
actual_max_d is None
), f"incorrect values returned from _fit_datetime_checker - expected: None, but got: {actual_max_d}"
def test_correct_datetime_values_identified_dict(self):
"""Test fit datetime checker identifies correct range values after fit called when inputting a dictionary"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
datetime_dict = {"d": {"maximum": True, "minimum": True}}
x = InputChecker(datetime_columns=datetime_dict)
x.fit(df)
expected_min_d = pd.to_datetime("15/10/2018").date()
expected_max_d = pd.to_datetime("01/02/2021").date()
actual_min_d = x.datetime_values["d"]["minimum"]
actual_max_d = x.datetime_values["d"]["maximum"]
assert (
actual_min_d == expected_min_d
), f"incorrect values returned from _fit_datetime_checker - expected: {expected_min_d}, but got: {actual_min_d}"
assert (
actual_max_d == expected_max_d
), f"incorrect values returned from _fit_datetime_checker - expected: {expected_max_d}, but got: {actual_max_d}"
class TestFit(object):
"""Tests for InputChecker.fit()."""
def test_arguments(self):
"""Test that InputChecker fit has expected arguments."""
h.test_function_arguments(
func=InputChecker.fit,
expected_arguments=["self", "X", "y"],
expected_default_values=(None,),
)
def test_super_fit_called(self, mocker):
"""Test that BaseTransformer fit called."""
expected_call_args = {
0: {"args": (data_generators_p.create_df_2(), None), "kwargs": {}}
}
df = data_generators_p.create_df_2()
x = InputChecker(columns=["a"])
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "fit", expected_call_args
):
x.fit(df)
def test_all_columns_selected(self):
"""Test fit selects all columns when columns parameter set to None"""
df = data_generators_p.create_df_2()
x = InputChecker(columns=None)
assert (
x.columns is None
), f"incorrect columns attribute before fit when columns parameter set to None - expected: None but got: {x.columns}"
x.fit(df)
assert x.columns == [
"a",
"b",
"c",
], f"incorrect columns identified when columns parameter set to None - expected: ['a', 'b', 'c'] but got: {x.columns}"
def test_fit_returns_self(self):
"""Test fit returns self?"""
df = data_generators_p.create_df_2()
x = InputChecker()
x_fitted = x.fit(df)
assert x_fitted is x, "Returned value from InputChecker.fit not as expected."
def test_no_optional_calls_fit(self):
"""Test numerical_values and expected_values is not present after fit if parameters set to None"""
x = InputChecker(
numerical_columns=None, categorical_columns=None, datetime_columns=None
)
df = data_generators_p.create_df_2()
x.fit(df)
assert (
hasattr(x, "numerical_values") is False
), "numerical_values attribute present with numerical_columns set to None"
assert (
hasattr(x, "expected_values") is False
), "expected_values attribute present with categorical_columns set to None"
assert (
hasattr(x, "datetime_values") is False
), "datetime_values attribute present with datetime_columns set to None"
def test_compulsory_checks_generated_with_no_optional_calls_fit(self):
"""Test null_map and column_classes are present after fit when optional parameters set to None"""
x = InputChecker(
numerical_columns=None, categorical_columns=None, datetime_columns=None
)
df = data_generators_p.create_df_2()
x.fit(df)
assert (
hasattr(x, "null_map") is True
), "null_map attribute not present when optional checks set to None"
assert (
hasattr(x, "column_classes") is True
), "column_classes attribute not present when optional checks set to None"
def test_all_checks_generated(self):
"""Test all checks are generated when all optional parameters set"""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
assert (
hasattr(x, "numerical_values") is True
), "numerical_values attribute not present after fit with numerical_columns set"
assert (
hasattr(x, "expected_values") is True
), "expected_values attribute not present after fit with categorical_columns set"
assert (
hasattr(x, "datetime_values") is True
), "expected_values attribute not present after fit with datetime_columns set"
assert (
hasattr(x, "null_map") is True
), "null_map attribute not present after fit"
assert (
hasattr(x, "column_classes") is True
), "column_classes attribute not present after fit"
def test_check_df_is_empty_called(self, mocker):
"""Test check is df empty is called by the fit method."""
x = InputChecker(
columns=["a", "b", "c"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
)
df = data_generators_p.create_df_2()
spy = mocker.spy(input_checker.checker.InputChecker, "_df_is_empty")
x.fit(df)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker._df_is_empty with fit"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
expected_pos_args_0 = (x, "input dataframe", df)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in _df_is_empty call for dataframe argument"
class TestTransformTypeChecker(object):
"""Tests for InputChecker._transform_type_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_type_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_type_checker,
expected_arguments=["self", "X", "batch_mode"],
expected_default_values=(False,),
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["column_classes"],), "kwargs": {}}}
x = InputChecker()
df = data_generators_p.create_df_2()
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_type_checker(df)
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_type_checker returns results dictionary"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
type_checker_failed_checks = x._transform_type_checker(df)
assert isinstance(
type_checker_failed_checks, dict
), f"incorrect type results type identified - expected: dict but got: {type(type_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_type_checker passes all the checks on the training dataframe"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
type_checker_failed_checks = x._transform_type_checker(df)
assert (
type_checker_failed_checks == {}
), f"Type checker found failed tests - {list(type_checker_failed_checks.keys())}"
def test_transform_passes_column_all_nulls(self):
"""Test _transform_type_checker passes all the checks on the training dataframe when a column contains only nulls"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
df["c"] = np.nan
type_checker_failed_checks = x._transform_type_checker(df)
assert (
type_checker_failed_checks == {}
), f"Type checker found failed tests - {list(type_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_type_checker captures a failed check"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
exp_type = df["a"].dtypes
df.loc[5, "a"] = "a"
type_checker_failed_checks = x._transform_type_checker(df)
assert (
type_checker_failed_checks["a"]["actual"] == df["a"].dtypes
), f"incorrect values saved to type_checker_failed_checks bad types - expected: [{type('a')}] but got: {type_checker_failed_checks['a']['types']}"
assert (
type_checker_failed_checks["a"]["expected"] == exp_type
), f"incorrect values saved to type_checker_failed_checks expected types - expected: [{exp_type}] but got: {type_checker_failed_checks['a']['types']}"
def test_transform_passes_batch_mode(self):
"""Test _transform_type_checker passes all the checks on the training dataframe"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
type_checker_failed_checks = x._transform_type_checker(df, batch_mode=True)
assert (
type_checker_failed_checks == {}
), f"Type checker found failed tests - {list(type_checker_failed_checks.keys())}"
def test_transform_captures_failed_test_batch_mode(self):
"""Test _transform_type_checker handles mixed types"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
print(df)
x = InputChecker()
x.fit(df)
exp_type = df["a"].dtypes
print(exp_type)
df.loc[5, "a"] = "a"
df.loc[1, "d"] = "a"
df.loc[3, "b"] = 1
type_checker_failed_checks = x._transform_type_checker(df, batch_mode=True)
expected_output = {
"a": {"idxs": [5], "actual": {5: "str"}, "expected": "float"},
"b": {"idxs": [3], "actual": {3: "int"}, "expected": "str"},
"d": {"idxs": [1], "actual": {1: "str"}, "expected": "Timestamp"},
}
for k, v in expected_output.items():
assert (
k in type_checker_failed_checks.keys()
), f"expected column {k} in type_checker_failed_checks output"
assert (
type(type_checker_failed_checks[k]) == dict
), f"expected dict for column {k} in type_checker_failed_checks output"
for sub_k, sub_v in expected_output[k].items():
assert (
sub_k in type_checker_failed_checks[k].keys()
), f"expected {sub_k} as dict key in type_checker_failed_checks output"
assert (
sub_v == type_checker_failed_checks[k][sub_k]
), f"expected {sub_v} as value for {sub_k} in column {k} output of type_checker_failed_checks output"
class TestTransformNullChecker(object):
"""Tests for InputChecker._transform_null_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_null_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_null_checker, expected_arguments=["self", "X"]
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["null_map"],), "kwargs": {}}}
x = InputChecker()
df = data_generators_p.create_df_2()
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_null_checker(df)
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_null_checker returns results dictionary"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
null_checker_failed_checks = x._transform_null_checker(df)
assert isinstance(
null_checker_failed_checks, dict
), f"incorrect null results type identified - expected: dict but got: {type(null_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_null_checker passes all the checks on the training dataframe"""
df = data_generators_p.create_df_2()
df["b"] = df["b"].fillna("a")
x = InputChecker()
x.fit(df)
null_checker_failed_checks = x._transform_null_checker(df)
assert (
null_checker_failed_checks == {}
), f"Null checker found failed tests - {list(null_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_null_checker captures a failed check"""
df = data_generators_p.create_df_2()
df["b"] = df["b"].fillna("a")
x = InputChecker()
x.fit(df)
df.loc[5, "b"] = np.nan
null_checker_failed_checks = x._transform_null_checker(df)
assert null_checker_failed_checks["b"] == [
5
], f"incorrect values saved to value_checker_failed_checks - expected: [5] but got: {null_checker_failed_checks['b']}"
class TestTransformNumericalChecker(object):
"""Tests for InputChecker._transform_numerical_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_numerical_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_numerical_checker,
expected_arguments=["self", "X", "type_fails", "batch_mode"],
expected_default_values=(
{},
False,
),
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["numerical_values"],), "kwargs": {}}}
x = InputChecker(numerical_columns=["a"])
df = data_generators_p.create_df_2()
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_numerical_checker(df, {})
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_numerical_checker returns results dictionary"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
assert isinstance(
numerical_checker_failed_checks, dict
), f"incorrect numerical results type identified - expected: dict but got: {type(numerical_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_numerical_checker passes all the numerical checks on the training dataframe"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
assert (
numerical_checker_failed_checks == {}
), f"Numerical checker found failed tests - {list(numerical_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_numerical_checker captures a failed check"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
df.loc[0, "a"] = -1
df.loc[5, "a"] = 7
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
expected_max = {5: 7.0}
expected_min = {0: -1.0}
assert (
numerical_checker_failed_checks["a"]["maximum"] == expected_max
), f"incorrect values saved to numerical_checker_failed_checks - expected: {expected_max} but got: {numerical_checker_failed_checks['a']['maximum']}"
assert (
numerical_checker_failed_checks["a"]["minimum"] == expected_min
), f"incorrect values saved to numerical_checker_failed_checks - expected: {expected_min} but got: {numerical_checker_failed_checks['a']['minimum']}"
def test_transform_captures_failed_test_only_maximum(self):
"""Test _transform_numerical_checker captures a failed check when the check includes a maximum value but no minimum value"""
df = data_generators_p.create_df_2()
numerical_dict = {}
numerical_dict["a"] = {}
numerical_dict["a"]["maximum"] = True
numerical_dict["a"]["minimum"] = False
x = InputChecker(numerical_columns=numerical_dict)
x.fit(df)
df.loc[0, "a"] = -1
df.loc[5, "a"] = 7
expected_max = {5: 7.0}
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
assert (
numerical_checker_failed_checks["a"]["maximum"] == expected_max
), f"incorrect values saved to numerical_checker_failed_checks - expected: {expected_max} but got: {numerical_checker_failed_checks['a']['maximum']}"
assert (
"minimum" not in numerical_checker_failed_checks["a"]
), "No minimum value results expected given input the numerical dict"
def test_transform_captures_failed_test_only_minimum(self):
"""Test _transform_numerical_checker captures a failed check when the check includes a minimum value but no maximum value"""
df = data_generators_p.create_df_2()
numerical_dict = {}
numerical_dict["a"] = {}
numerical_dict["a"]["maximum"] = False
numerical_dict["a"]["minimum"] = True
x = InputChecker(numerical_columns=numerical_dict)
x.fit(df)
df.loc[0, "a"] = -1
df.loc[5, "a"] = 7
numerical_checker_failed_checks = x._transform_numerical_checker(df, {})
expected_min = {0: -1.0}
assert (
numerical_checker_failed_checks["a"]["minimum"] == expected_min
), f"incorrect values saved to numerical_checker_failed_checks - expected: {expected_min} but got: {numerical_checker_failed_checks['a']['minimum']}"
assert (
"maximum" not in numerical_checker_failed_checks["a"]
), "No maximum value results expected given input the numerical dict"
def test_transform_skips_failed_type_checks_batch_mode(self):
"""Test _transform_numerical_checker skips checks for rows which aren't numerical
when operating in batch mode"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
df.loc[4, "a"] = "z"
df.loc[1, "a"] = 1
df.loc[2, "a"] = 100
type_fails_dict = {
"a": {"idxs": [1, 4], "actual": {1: "int", 4: "str"}, "expected": "float"}
}
expected_output = {"a": {"max idxs": [2], "maximum": {2: 100}}}
numerical_checker_failed_checks = x._transform_numerical_checker(
df, type_fails_dict, batch_mode=True
)
h.assert_equal_dispatch(
actual=numerical_checker_failed_checks,
expected=expected_output,
msg="rows failing type check have not been removed by _transform_numerical_checker",
)
def test_transform_skips_failed_type_checks(self):
"""Test _transform_numerical_checker skips checks for columns which aren't numerical
when not operating in batch mode"""
df = data_generators_p.create_df_2()
x = InputChecker(numerical_columns=["a"])
x.fit(df)
# Case 1: check will not be performed as column a is not numerical
df_test = pd.DataFrame({"a": ["z", "zz", "zzz"]})
type_fails_dict = {
"a": {"actual": df_test["a"].dtypes, "expected": df["a"].dtypes}
}
numerical_checker_failed_checks = x._transform_numerical_checker(
df_test, type_fails_dict, batch_mode=False
)
h.assert_equal_dispatch(
actual=numerical_checker_failed_checks,
expected={},
msg="rows failing type check have not been removed by _transform_numerical_checker",
)
# Case 2: column a should still get checked because even though type does not match,
# int != float the column is still numerical
df_test2 = pd.DataFrame({"a": [5, 3, 222]})
type_fails_dict2 = {
"a": {"actual": df_test2["a"].dtypes, "expected": df["a"].dtypes}
}
numerical_checker_failed_checks2 = x._transform_numerical_checker(
df_test2, type_fails_dict2, batch_mode=False
)
h.assert_equal_dispatch(
actual=numerical_checker_failed_checks2,
expected={"a": {"max idxs": [2], "maximum": {2: 222}}},
msg="rows failing type check have not been removed by _transform_numerical_checker",
)
class TestTransformValueChecker(object):
"""Tests for InputChecker._transform_value_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_value_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_value_checker, expected_arguments=["self", "X"]
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["expected_values"],), "kwargs": {}}}
x = InputChecker(categorical_columns=["b", "c"])
df = data_generators_p.create_df_2()
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_value_checker(df)
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_value_checker returns results dictionary"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
value_checker_failed_checks = x._transform_value_checker(df)
assert isinstance(
value_checker_failed_checks, dict
), f"incorrect numerical results type identified - expected: dict but got: {type(value_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_value_checker passes all the categorical checks on the training dataframe"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
value_checker_failed_checks = x._transform_value_checker(df)
assert (
value_checker_failed_checks == {}
), f"Categorical checker found failed tests - {list(value_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_value_checker captures a failed check"""
df = data_generators_p.create_df_2()
x = InputChecker(categorical_columns=["b", "c"])
x.fit(df)
df.loc[5, "b"] = "u"
value_checker_failed_checks = x._transform_value_checker(df)
assert value_checker_failed_checks["b"]["values"] == [
"u"
], f"incorrect values saved to value_checker_failed_checks - expected: ['u'] but got: {value_checker_failed_checks['b']['values']}"
assert value_checker_failed_checks["b"]["idxs"] == [
5
], f"incorrect values saved to value_checker_failed_checks - expected: [5] but got: {value_checker_failed_checks['b']['idxs']}"
class TestTransformDatetimeChecker(object):
"""Tests for InputChecker._transform_datetime_checker()."""
def test_arguments(self):
"""Test that InputChecker _transform_datetime_checker has expected arguments."""
h.test_function_arguments(
func=InputChecker._transform_datetime_checker,
expected_arguments=["self", "X", "type_fails", "batch_mode"],
expected_default_values=(
{},
False,
),
)
def test_check_fitted_called(self, mocker):
"""Test that transform calls BaseTransformer.check_is_fitted."""
expected_call_args = {0: {"args": (["datetime_values"],), "kwargs": {}}}
x = InputChecker(datetime_columns=["d"])
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x.fit(df)
with h.assert_function_call(
mocker, tubular.base.BaseTransformer, "check_is_fitted", expected_call_args
):
x._transform_datetime_checker(df, {})
def test_transform_returns_failed_checks_dict(self):
"""Test _transform_datetime_checker returns results dictionary"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
datetime_checker_failed_checks = x._transform_datetime_checker(df, {})
assert isinstance(
datetime_checker_failed_checks, dict
), f"incorrect datetime results type identified - expected: dict but got: {type(datetime_checker_failed_checks)}"
def test_transform_passes(self):
"""Test _transform_datetime_checker passes all the numerical checks on the training dataframe"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
datetime_checker_failed_checks = x._transform_datetime_checker(df, {})
assert (
datetime_checker_failed_checks == {}
), f"Datetime checker found failed tests - {list(datetime_checker_failed_checks.keys())}"
def test_transform_captures_failed_test(self):
"""Test _transform_datetime_checker captures a failed check"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
outliers_1 = pd.to_datetime("15/09/2017", utc=False)
outliers_2 = pd.to_datetime("13/09/2017", utc=False)
df.loc[0, "d"] = outliers_1
df.loc[1, "d"] = outliers_2
datetime_checker_failed_checks = x._transform_datetime_checker(df, {})
results = datetime_checker_failed_checks["d"]["minimum"]
assert results[0] == outliers_1, (
f"incorrect values saved to datetime_checker_failed_checks - "
f"expected: {outliers_1} but got: {results[0]} "
)
assert results[1] == outliers_2, (
f"incorrect values saved to datetime_checker_failed_checks - "
f"expected: {outliers_2} but got: {results[1]} "
)
def test_transform_captures_failed_test_both_minimum_and_maximum(self):
"""Test _transform_datetime_checker captures a failed check when the check includes a maximum value and a
minimum value"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
datetime_dict = {"d": {"maximum": True, "minimum": True}}
x = InputChecker(datetime_columns=datetime_dict)
x.fit(df)
lower_outliers = pd.to_datetime("15/09/2017", utc=False)
upper_outliers = pd.to_datetime("20/01/2021", utc=False)
df.loc[0, "d"] = lower_outliers
df.loc[5, "d"] = upper_outliers
datetime_checker_failed_checks = x._transform_datetime_checker(df, {})
expected_min = {0: lower_outliers}
expected_max = {5: upper_outliers}
assert datetime_checker_failed_checks["d"]["maximum"] == expected_max, (
f"incorrect values saved to "
f"datetime_checker_failed_checks - "
f"expected: {expected_max} but got: "
f"{datetime_checker_failed_checks['d']['maximum']} "
)
assert datetime_checker_failed_checks["d"]["minimum"] == expected_min, (
f"incorrect values saved to "
f"datetime_checker_failed_checks - "
f"expected: {expected_min} but got: "
f"{datetime_checker_failed_checks['d']['minimum']} "
)
def test_transform_skips_failed_type_checks_batch_mode(self):
"""Test _transform_datetime_checker skips checks for rows which aren't datetime type
when operating in batch mode"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
df.loc[3, "d"] = 1
df.loc[4, "d"] = "z"
df.loc[5, "d"] = pd.to_datetime("20/09/2011", utc=False)
type_fails_dict = {
"d": {
"idxs": [3, 4],
"actual": {3: "int", 4: "str"},
"expected": "Timestamp",
}
}
datetime_checker_failed_checks = x._transform_datetime_checker(
df, type_fails_dict, batch_mode=True
)
h.assert_equal_dispatch(
actual=datetime_checker_failed_checks,
expected={
"d": {
"minimum": {5: pd.to_datetime("20/09/2011", utc=False)},
"min idxs": [5],
}
},
msg="rows failing type check have not been removed by _transform_datetime_checker",
)
def test_transform_skips_failed_type_checks(self):
"""Test _transform_datetime_checker skips checks for columns which aren't datetime
when not operating in batch mode"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
df_test = pd.DataFrame({"d": ["z", "zz", "zzz"]})
type_fails_dict = {
"d": {"actual": df_test["d"].dtypes, "expected": df["d"].dtypes}
}
datetime_checker_failed_checks = x._transform_datetime_checker(
df_test, type_fails_dict, batch_mode=False
)
h.assert_equal_dispatch(
actual=datetime_checker_failed_checks,
expected={},
msg="rows failing type check have not been removed by _transform_datetime_checker",
)
class TestTransform(object):
"""Tests for InputChecker.transform()."""
def test_arguments(self):
"""Test that transform has expected arguments."""
h.test_function_arguments(
func=InputChecker.transform,
expected_arguments=["self", "X", "batch_mode"],
expected_default_values=(False,),
)
def test_super_transform_called(self, mocker):
"""Test super transform is called by the transform method."""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
spy = mocker.spy(tubular.base.BaseTransformer, "transform")
df = x.transform(df)
assert (
spy.call_count == 1
), "unexpected number of calls to tubular.base.BaseTransformer.transform with transform"
def test_transform_returns_df(self):
"""Test fit returns df"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker()
x.fit(df)
df_transformed = x.transform(df)
assert df_transformed.equals(
df
), "Returned value from InputChecker.transform not as expected."
def test_batch_mode_transform_returns_df(self):
"""Test fit returns df"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x = InputChecker()
x.fit(df)
df_transformed, bad_df = x.transform(df, batch_mode=True)
assert df_transformed.equals(
df
), "Returned value from InputChecker.transform not as expected."
h.assert_equal_dispatch(
expected=df,
actual=df_transformed,
msg="Returned df of passed rows from InputChecker.transform not as expected.",
)
h.assert_equal_dispatch(
expected=pd.DataFrame(
columns=df.columns.values.tolist() + ["failed_checks"]
),
actual=bad_df,
msg="Returned df of failed rows from InputChecker.transform not as expected.",
)
def test_check_df_is_empty_called(self, mocker):
"""Test check is df empty is called by the transform method."""
x = InputChecker(
columns=["a", "b", "c", "d"],
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
spy = mocker.spy(input_checker.checker.InputChecker, "_df_is_empty")
df = x.transform(df)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker._df_is_empty with transform"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
expected_pos_args_0 = (x, "scoring dataframe", df)
h.assert_equal_dispatch(
expected=expected_pos_args_0,
actual=call_0_pos_args,
msg="positional args unexpected in _df_is_empty call for scoring dataframe argument",
)
def test_non_optional_transforms_always_called(self, mocker):
"""Test non-optional checks are called by the transform method irrespective of categorical_columns,
numerical_columns & datetime_columns values."""
x = InputChecker(
numerical_columns=None, categorical_columns=None, datetime_columns=None
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
spy_null = mocker.spy(
input_checker.checker.InputChecker, "_transform_null_checker"
)
spy_type = mocker.spy(
input_checker.checker.InputChecker, "_transform_type_checker"
)
df = x.transform(df)
assert spy_null.call_count == 1, (
"unexpected number of calls to _transform_null_checker with transform when numerical_columns and "
"categorical_columns set to None "
)
assert spy_type.call_count == 1, (
"unexpected number of calls to _transform_type_checker with transform when numerical_columns and "
"categorical_columns set to None "
)
def test_optional_transforms_not_called(self, mocker):
"""Test optional checks are not called by the transform method."""
x = InputChecker(
numerical_columns=None, categorical_columns=None, datetime_columns=None
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
spy_numerical = mocker.spy(
input_checker.checker.InputChecker, "_transform_numerical_checker"
)
spy_categorical = mocker.spy(
input_checker.checker.InputChecker, "_transform_value_checker"
)
spy_datetime = mocker.spy(
input_checker.checker.InputChecker, "_transform_datetime_checker"
)
df = x.transform(df)
assert (
spy_numerical.call_count == 0
), "unexpected number of calls to _transform_numerical_checker with transform when numerical_columns set to None"
assert (
spy_categorical.call_count == 0
), "unexpected number of calls to _transform_value_checker with transform when categorical_columns set to None"
assert (
spy_datetime.call_count == 0
), "unexpected number of calls to _transform_datetime_checker with transform when datetime_columns set to None"
def test_raise_exception_if_checks_fail_called_no_optionals(self, mocker):
"""Test raise exception is called by the transform method when categorical, numerical_& datetime columns set
to None."""
x = InputChecker()
df = data_generators_p.create_df_2()
x.fit(df)
spy = mocker.spy(
input_checker.checker.InputChecker, "raise_exception_if_checks_fail"
)
df = x.transform(df)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker.raise_exception_if_checks_fail with transform"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
value_failed_checks = {}
numerical_failed_checks = {}
datetime_failed_checks = {}
type_failed_checks = x._transform_type_checker(df)
null_failed_checks = x._transform_null_checker(df)
expected_pos_args_0 = (
x,
type_failed_checks,
null_failed_checks,
value_failed_checks,
numerical_failed_checks,
datetime_failed_checks,
)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in raise_exception_if_checks_fail call in transform method"
def test_raise_exception_if_checks_fail_called_all_checks(self, mocker):
"""Test raise exception is called by the transform method when categorical_columns and numerical_columns set
to None."""
x = InputChecker(
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
x.fit(df)
spy = mocker.spy(
input_checker.checker.InputChecker, "raise_exception_if_checks_fail"
)
df = x.transform(df)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker.raise_exception_if_checks_fail with transform"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
value_failed_checks = x._transform_value_checker(df)
numerical_failed_checks = x._transform_numerical_checker(df)
datetime_failed_checks = x._transform_datetime_checker(df)
type_failed_checks = x._transform_type_checker(df)
null_failed_checks = x._transform_null_checker(df)
expected_pos_args_0 = (
x,
type_failed_checks,
null_failed_checks,
value_failed_checks,
numerical_failed_checks,
datetime_failed_checks,
)
assert (
expected_pos_args_0 == call_0_pos_args
), "positional args unexpected in raise_exception_if_checks_fail call in transform method"
def test_separate_passes_and_fails_called_no_optionals(self, mocker):
"""Test raise exception is called by the transform method when categorical, numerical_& datetime columns set
to None."""
x = InputChecker()
df = data_generators_p.create_df_2()
orig_df = df.copy(deep=True)
x.fit(df)
spy = mocker.spy(
input_checker.checker.InputChecker, "separate_passes_and_fails"
)
df, bad_df = x.transform(df, batch_mode=True)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker.separate_passes_and_fails with transform"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
value_failed_checks = {}
numerical_failed_checks = {}
datetime_failed_checks = {}
type_failed_checks = x._transform_type_checker(df)
null_failed_checks = x._transform_null_checker(df)
expected_pos_args_0 = (
x,
type_failed_checks,
null_failed_checks,
value_failed_checks,
numerical_failed_checks,
datetime_failed_checks,
orig_df,
)
h.assert_equal_dispatch(
expected=expected_pos_args_0,
actual=call_0_pos_args,
msg="positional args unexpected in separate_passes_and_fails call in transform method",
)
def test_separate_passes_and_fails_called_all_checks(self, mocker):
"""Test raise exception is called by the transform method when categorical_columns and numerical_columns set
to None."""
x = InputChecker(
numerical_columns=["a"],
categorical_columns=["b", "c"],
datetime_columns=["d"],
)
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
"24/07/2020",
]
)
orig_df = df.copy(deep=True)
x.fit(df)
spy = mocker.spy(
input_checker.checker.InputChecker, "separate_passes_and_fails"
)
df, bad_df = x.transform(df, batch_mode=True)
assert (
spy.call_count == 1
), "unexpected number of calls to InputChecker.separate_passes_and_fails with transform"
call_0_args = spy.call_args_list[0]
call_0_pos_args = call_0_args[0]
value_failed_checks = x._transform_value_checker(df)
numerical_failed_checks = x._transform_numerical_checker(df)
datetime_failed_checks = x._transform_datetime_checker(df)
type_failed_checks = x._transform_type_checker(df)
null_failed_checks = x._transform_null_checker(df)
expected_pos_args_0 = (
x,
type_failed_checks,
null_failed_checks,
value_failed_checks,
numerical_failed_checks,
datetime_failed_checks,
orig_df,
)
h.assert_equal_dispatch(
expected=expected_pos_args_0,
actual=call_0_pos_args,
msg="positional args unexpected in separate_passes_and_fails call in transform method",
)
class TestRaiseExceptionIfChecksFail(object):
"""Tests for InputChecker.raise_exception_if_checks_fail()."""
def test_arguments(self):
"""Test that raise_exception_if_checks_fail has expected arguments."""
h.test_function_arguments(
func=InputChecker.raise_exception_if_checks_fail,
expected_arguments=[
"self",
"type_failed_checks",
"null_failed_checks",
"value_failed_checks",
"numerical_failed_checks",
"datetime_failed_checks",
],
expected_default_values=None,
)
def test_no_failed_checks_before_transform(self):
"""Test validation_failed_checks is not present before transform"""
x = InputChecker()
df = data_generators_p.create_df_2()
x.fit(df)
assert (
hasattr(x, "validation_failed_checks") is False
), "validation_failed_checks attribute present before transform"
def test_validation_failed_checks_saved(self):
"""Test raise_exception_if_checks_fail saves the validation results"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
df = x.transform(df)
assert (
hasattr(x, "validation_failed_checks") is True
), "validation_failed_checks attribute not present after transform"
assert isinstance(
x.validation_failed_checks, dict
), f"incorrect validation results type identified - expected: dict but got: {type(x.validation_failed_checks)}"
def test_correct_validation_failed_checks(self):
"""Test raise_exception_if_checks_fail saves and prints the correct error message"""
df = data_generators_p.create_df_2()
x = InputChecker()
x.fit(df)
df = x.transform(df)
assert isinstance(
x.validation_failed_checks["Failed type checks"], dict
), f"incorrect type validation results type identified - expected: dict but got: {type(x.validation_failed_checks['Failed type checks'])}"
assert isinstance(
x.validation_failed_checks["Failed null checks"], dict
), f"incorrect null validation results type identified - expected: dict but got: {type(x.validation_failed_checks['Failed null checks'])}"
assert isinstance(
x.validation_failed_checks["Failed categorical checks"], dict
), f"incorrect categorical validation results type identified - expected: dict but got: {type(x.validation_failed_checks['Failed categorical checks'])}"
assert isinstance(
x.validation_failed_checks["Failed numerical checks"], dict
), f"incorrect numerical validation results type identified - expected: dict but got: {type(x.validation_failed_checks['Failed numerical checks'])}"
assert isinstance(
x.validation_failed_checks["Failed datetime checks"], dict
), f"incorrect datetime validation results type identified - expected: dict but got: {type(x.validation_failed_checks['Failed datetime checks'])}"
assert isinstance(
x.validation_failed_checks["Exception message"], str
), f"incorrect exception message type identified - expected: str but got: {type(x.validation_failed_checks['Exception message'])}"
def test_input_checker_error_raised_type(self):
"""Test InputCheckerError is raised if type test fails"""
x = InputChecker()
df = data_generators_p.create_df_2()
x.fit(df)
df.loc[5, "a"] = "a"
with pytest.raises(InputCheckerError):
df = x.transform(df)
def test_input_checker_error_raised_nulls(self):
"""Test InputCheckerError is raised if null test fails"""
x = InputChecker()
df = data_generators_p.create_df_2()
df["b"] = df["b"].fillna("a")
x = InputChecker()
x.fit(df)
df.loc[5, "b"] = np.nan
with pytest.raises(InputCheckerError):
df = x.transform(df)
def test_input_checker_error_raised_categorical(self):
"""Test InputCheckerError is raised if categorical test fails"""
x = InputChecker(categorical_columns=["b"])
df = data_generators_p.create_df_2()
x.fit(df)
df.loc[5, "b"] = "u"
with pytest.raises(InputCheckerError):
df = x.transform(df)
def test_input_checker_error_raised_numerical(self):
"""Test InputCheckerError is raised if numerical test fails"""
x = InputChecker(numerical_columns=["a"])
df = data_generators_p.create_df_2()
x.fit(df)
df.loc[0, "a"] = -1
with pytest.raises(InputCheckerError):
df = x.transform(df)
def test_input_checker_error_raised_datetime(self):
"""Test InputCheckerError is raised if datetime test fails"""
df = data_generators_p.create_df_2()
df["d"] = pd.to_datetime(
[
"01/02/2020",
"01/02/2021",
"08/04/2019",
"01/03/2020",
"29/03/2019",
"15/10/2018",
np.NAN,
]
)
x = InputChecker(datetime_columns=["d"])
x.fit(df)
outliers_1 = | pd.to_datetime("15/09/2017") | pandas.to_datetime |
# This is a test file intended to be used with pytest
# pytest automatically runs all the function starting with "test_"
# see https://docs.pytest.org for more information
import math
import os
import sys
import numpy as np
import pandas as pd
## Add stuff to the path to enable exec outside of DSS
plugin_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.join(plugin_root, 'python-lib'))
import dku_timeseries
JUST_BEFORE_SPRING_DST = | pd.Timestamp('20190131 01:59:00') | pandas.Timestamp |
import pandas as pd
import numpy as np
##This file takes results_hyperParams(output of aucroc_aucpr) as input, retrieves the maxima of AUCROCmax/AUCPRmax/AUCROClast/AUCPRlast values and obtains the Hyperparamaters
##initialize Pandas DataFrame
values_df= | pd.DataFrame(columns=['hiddenSizes', 'lastDropout', 'weightDecay', 'AUCROC_max', 'AUCPR_max','AUCROC_last', 'AUCPR_last']) | pandas.DataFrame |
#!/bin/env python
import boto3
import os
from datetime import datetime, timedelta
from boto3.dynamodb.conditions import Key
from botocore.exceptions import ClientError
from dash.app import stock_cache
from dash.stock import Stock
from dash.userInfo import UserInfo
from decimal import Decimal
import numpy as np
import pandas as pd
dynamodb = boto3.resource('dynamodb')
watch_list_table = dynamodb.Table('UserWatchList')
def lambda_handler(event, context):
watch_list = get_watch_list()
for item in watch_list:
print(item['user_name'] + ' ' + item['company'])
stock = stock_cache.get(item['company'], Stock(item['company']))
watch_list_item = {
'projection_years': stock.n_projection_years,
'excpected_eps': round(stock.estimated_growth * 100.,2),
'target_yield': round(stock.target_yield * 100.,2),
'target_p_e': round(stock.target_pe,1),
'expected_dividends': round(stock.expected_dividends,2),
'expected_dividends_growth': round(stock.projected_dividends_growth * 100. ,2),
#'projected_price': projected_price,
#'projected_total_dividends': projected_total_dividends,
#'target_price': target_price,
#'current_price': current_target_price,
'alert_price': round(stock.target_price,2)
}
try:
userInfo = UserInfo(item['user_name'])
if 'alert_price' in item and not | pd.isna(stock.current_price) | pandas.isna |
import numpy as np
import pandas as pd
import os,re
import multiprocessing
import h5py
import csv
import ujson
from operator import itemgetter
from collections import defaultdict
from io import StringIO
from . import helper
from ..utils import misc
def index(eventalign_result,pos_start,out_paths,locks):
eventalign_result = eventalign_result.set_index(['contig','read_index'])
pos_end=pos_start
with locks['index'], open(out_paths['index'],'a') as f_index:
for index in list(dict.fromkeys(eventalign_result.index)):
transcript_id,read_index = index
pos_end += eventalign_result.loc[index]['line_length'].sum()
try: # sometimes read_index is nan
f_index.write('%s,%d,%d,%d\n' %(transcript_id,read_index,pos_start,pos_end))
except:
pass
pos_start = pos_end
def parallel_index(eventalign_filepath,chunk_size,out_dir,n_processes,resume):
# Create output paths and locks.
out_paths,locks = dict(),dict()
for out_filetype in ['index']:
out_paths[out_filetype] = os.path.join(out_dir,'eventalign.%s' %out_filetype)
locks[out_filetype] = multiprocessing.Lock()
# read_names_done = []
# if resume and os.path.exists(out_paths['log']):
# read_names_done = [line.rstrip('\n') for line in open(out_paths['log'],'r')]
# else:
# Create empty files.
with open(out_paths['index'],'w') as f:
f.write('transcript_id,read_index,pos_start,pos_end\n') # header
# Create communication queues.
task_queue = multiprocessing.JoinableQueue(maxsize=n_processes * 2)
# Create and start consumers.
consumers = [helper.Consumer(task_queue=task_queue,task_function=index,locks=locks) for i in range(n_processes)]
for p in consumers:
p.start()
## Load tasks into task_queue. A task is eventalign information of one read.
eventalign_file = open(eventalign_filepath,'r')
pos_start = len(eventalign_file.readline()) #remove header
chunk_split = None
index_features = ['contig','read_index','line_length']
for chunk in pd.read_csv(eventalign_filepath, chunksize=chunk_size,sep='\t'):
chunk_complete = chunk[chunk['read_index'] != chunk.iloc[-1]['read_index']]
chunk_concat = pd.concat([chunk_split,chunk_complete])
chunk_concat_size = len(chunk_concat.index)
## read the file at where it left off because the file is opened once ##
lines = [len(eventalign_file.readline()) for i in range(chunk_concat_size)]
chunk_concat['line_length'] = np.array(lines)
task_queue.put((chunk_concat[index_features],pos_start,out_paths))
pos_start += sum(lines)
chunk_split = chunk[chunk['read_index'] == chunk.iloc[-1]['read_index']]
## the loop above leaves off w/o adding the last read_index to eventalign.index
chunk_split_size = len(chunk_split.index)
lines = [len(eventalign_file.readline()) for i in range(chunk_split_size)]
chunk_split['line_length'] = np.array(lines)
task_queue.put((chunk_split[index_features],pos_start,out_paths))
# Put the stop task into task_queue.
task_queue = helper.end_queue(task_queue,n_processes)
# Wait for all of the tasks to finish.
task_queue.join()
def t2g(gene_id,fasta_dict,gtf_dict,g2t_mapping,df_eventalign_index,readcount_min):
tx_ids = []
t2g_dict = {}
transcripts = [tx for tx in gtf_dict if tx in g2t_mapping[gene_id]]
n_reads = sum([len(df_eventalign_index.loc[tx]) for tx in transcripts])
if n_reads >= readcount_min:
for tx in transcripts:
tx_seq = fasta_dict[tx]
tx_contig = gtf_dict[tx]['chr']
if tx_seq is None:
continue
for exon_num in range(len(gtf_dict[tx]['exon'])):
g_interval=gtf_dict[tx]['exon'][exon_num]
tx_interval=gtf_dict[tx]['tx_exon'][exon_num]
for g_pos in range(g_interval[0],g_interval[1]+1): # Exclude the rims of exons.
dis_from_start = g_pos - g_interval[0]
if gtf_dict[tx]['strand'] == "+":
tx_pos = tx_interval[0] + dis_from_start
elif gtf_dict[tx]['strand'] == "-":
tx_pos = tx_interval[1] - dis_from_start
if (g_interval[0] <= g_pos < g_interval[0]+2) or (g_interval[1]-2 < g_pos <= g_interval[1]): # Todo: To improve the mapping
kmer = 'XXXXX'
else:
kmer = tx_seq[tx_pos-2:tx_pos+3]
t2g_dict[(tx,tx_pos)] = (tx_contig,gene_id,g_pos,kmer) # tx.contig is chromosome.
tx_ids += [tx]
return n_reads, tx_ids, t2g_dict
def combine(events_str):
f_string = StringIO(events_str)
eventalign_result = pd.read_csv(f_string,delimiter='\t',names=['contig','position','reference_kmer','read_index',
'strand','event_index','event_level_mean','event_stdv','event_length','model_kmer',
'model_mean', 'model_stdv', 'standardized_level', 'start_idx', 'end_idx'])
f_string.close()
cond_successfully_eventaligned = eventalign_result['reference_kmer'] == eventalign_result['model_kmer']
if cond_successfully_eventaligned.sum() != 0:
eventalign_result = eventalign_result[cond_successfully_eventaligned]
keys = ['read_index','contig','position','reference_kmer'] # for groupby
eventalign_result['length'] = pd.to_numeric(eventalign_result['end_idx'])-pd.to_numeric(eventalign_result['start_idx'])
eventalign_result['sum_norm_mean'] = pd.to_numeric(eventalign_result['event_level_mean']) * eventalign_result['length']
eventalign_result = eventalign_result.groupby(keys)
sum_norm_mean = eventalign_result['sum_norm_mean'].sum()
start_idx = eventalign_result['start_idx'].min()
end_idx = eventalign_result['end_idx'].max()
total_length = eventalign_result['length'].sum()
eventalign_result = pd.concat([start_idx,end_idx],axis=1)
eventalign_result['norm_mean'] = (sum_norm_mean/total_length).round(1)
eventalign_result.reset_index(inplace=True)
# eventalign_result['transcript_id'] = [contig.split('.')[0] for contig in eventalign_result['contig']] #### CHANGE MADE ####
eventalign_result['transcript_id'] = [contig for contig in eventalign_result['contig']]
#eventalign_result['transcript_id'] = eventalign_result['contig']
eventalign_result['transcriptomic_position'] = pd.to_numeric(eventalign_result['position']) + 2 # the middle position of 5-mers.
# eventalign_result = misc.str_encode(eventalign_result)
# eventalign_result['read_id'] = [read_name]*len(eventalign_result)
# features = ['read_id','transcript_id','transcriptomic_position','reference_kmer','norm_mean','start_idx','end_idx']
# features_dtype = np.dtype([('read_id', 'S36'), ('transcript_id', 'S15'), ('transcriptomic_position', '<i8'), ('reference_kmer', 'S5'), ('norm_mean', '<f8'), ('start_idx', '<i8'), ('end_idx', '<i8')])
# features = ['transcript_id','transcriptomic_position','reference_kmer','norm_mean']
# df_events = eventalign_result[['read_index']+features]
# # print(df_events.head())
features = ['transcript_id','transcriptomic_position','reference_kmer','norm_mean']
# np_events = eventalign_result[features].reset_index().values.ravel().view(dtype=[('transcript_id', 'S15'), ('transcriptomic_position', '<i8'), ('reference_kmer', 'S5'), ('norm_mean', '<f8')])
df_events = eventalign_result[features]
np_events = np.rec.fromrecords(df_events, names=[*df_events])
return np_events
def readFasta(transcript_fasta_paths_or_urls):
fasta=open(transcript_fasta_paths_or_urls,"r")
entries=""
for ln in fasta:
entries+=ln
entries=entries.split(">")
dict={}
for entry in entries:
entry=entry.split("\n")
# id=entry[0].split(".")[0]
if len(entry[0].split()) > 0:
id=entry[0].split()[0]
seq="".join(entry[1:])
dict[id]=seq
return dict
def readGTF(gtf_path_or_url):
gtf=open(gtf_path_or_url,"r")
dict={}
for ln in gtf:
if not ln.startswith("#"):
ln=ln.split("\t")
if ln[2] == "transcript" or ln[2] == "exon":
chr,type,start,end=ln[0],ln[2],int(ln[3]),int(ln[4])
tx_id=ln[-1].split('; transcript_id "')[1].split('";')[0]
g_id=ln[-1].split('gene_id "')[1].split('";')[0]
if tx_id not in dict:
dict[tx_id]={'chr':chr,'g_id':g_id,'strand':ln[6]}
if type not in dict[tx_id]:
if type == "transcript":
dict[tx_id][type]=(start,end)
else:
if type == "exon":
if type not in dict[tx_id]:
dict[tx_id][type]=[(start,end)]
else:
dict[tx_id][type].append((start,end))
#convert genomic positions to tx positions
for id in dict:
tx_pos,tx_start=[],0
for pair in dict[id]["exon"]:
tx_end=pair[1]-pair[0]+tx_start
tx_pos.append((tx_start,tx_end))
tx_start=tx_end+1
dict[id]['tx_exon']=tx_pos
return dict
def parallel_preprocess_gene(eventalign_filepath,fasta_dict,gtf_dict,out_dir,n_processes,readcount_min,readcount_max,resume):
# Create output paths and locks.
out_paths,locks = dict(),dict()
for out_filetype in ['json','index','log','readcount']:
out_paths[out_filetype] = os.path.join(out_dir,'data.%s' %out_filetype)
locks[out_filetype] = multiprocessing.Lock()
# Writing the starting of the files.
gene_ids_done = []
if resume and os.path.exists(out_paths['index']):
df_index = | pd.read_csv(out_paths['index'],sep=',') | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_followers.ipynb (unless otherwise specified).
__all__ = ['get_followers', 'get_new_followers', 'get_dif', 'get_followers_change', 'get_ads_status', 'save_ads_status',
'get_updated_followers', 'more_stats', 'update_insights', 'update_dashboard_followers',
'save_dashboard_country_followers', 'save_followers', 'make_change', 'save_change', 'update']
# Cell
import os
from datetime import datetime, timedelta
from typing import *
import pandas as pd
import numpy as np
from pyfacebook import IgProApi
from pyfacebook.error import *
from .core import *
from .ads import *
# Cell
def get_followers() -> Tuple[str, Dict[str, int]]:
"""Get current followers by top country"""
api = IgProApi(
app_id=APP_ID, app_secret=APP_SECRET, long_term_token=TOKEN, version="5.0"
)
try:
response = api.get_user_insights(
user_id=USER_ID,
period="lifetime",
metrics=["audience_country"],
return_json=True,
)[0]["values"][0]
return response["end_time"].split("T")[0], response["value"]
except PyFacebookException as e:
return (e.message, {0: 0})
# Cell
def get_new_followers(date: str) -> Tuple[str, Dict[str, int]]:
"""Get total new followers for a certain date."""
year, month, d = [int(i) for i in date.split("-")]
api = IgProApi(
app_id=APP_ID, app_secret=APP_SECRET, long_term_token=TOKEN, version="5.0"
)
try:
response = api.get_user_insights(
user_id=USER_ID,
period="day",
metrics=["follower_count"],
since=datetime(year,month,d,0,0).strftime('%s'),
until=datetime(year,month,d,23,59).strftime('%s'),
return_json=True,
)[0]["values"][0]
return response["end_time"].split("T")[0], response["value"]
except PyFacebookException as e:
return (e.message, 0)
# Cell
def get_dif(df: pd.DataFrame) -> pd.DataFrame:
change_df = df.diff(axis=1, periods=-1)
change_df = change_df.fillna(0).astype(int)
return change_df
def get_followers_change(history_df: pd.DataFrame, date: str) -> pd.DataFrame:
"""Get country followers change from the previous entry on a given date"""
new_followers = get_dif(history_df)
mask = history_df.columns.str.startswith(date)
new_followers = new_followers.iloc[:, mask].iloc[:, :1].replace(0, np.nan)
new_followers = new_followers.dropna(axis=1, how="all").iloc[:, :1].dropna().astype(int)
if new_followers.empty:
new_followers[date] = 0
return new_followers
# Cell
def get_ads_status(date: str):
"""Get ads status - no insights sheets, no ads on."""
if get_df(date).empty:
return "OFF"
return "ON"
# Cell
def save_ads_status(history_df: pd.DataFrame):
worksheet = "Ads Status History"
ads_status_df = get_df(worksheet)
date = history_df.columns[0].split(" ")[0]
ads_status = get_ads_status(date)
change = get_followers_change(history_df, date).sum().item()
last_status = pd.Series([ads_status, change], name="", dtype=str, index=["Ads Status", "Change"])
last_status.name = date
write_df(pd.concat([last_status, ads_status_df], axis=1), worksheet)
# Cell
def get_updated_followers(
df: pd.DataFrame, data: Dict[str, int], end_time: str
) -> pd.DataFrame:
"""Update followers df with new data, if any"""
new_followers = pd.Series(data)
date = (datetime.strptime(end_time, "%Y-%m-%d") - timedelta(days=1)).strftime(f"%b %d %Y{' '*16}")
new_followers.name = f"{date} {str(datetime.utcnow()).split('.')[0]}"
last_entry = df.iloc[:, 0]
second_last_entry = df.iloc[:, 1]
if np.array_equal(last_entry[last_entry != 0].values, new_followers.sort_index().values) or np.array_equal(second_last_entry[second_last_entry != 0].values, new_followers.sort_index().values):
return pd.DataFrame()
else:
df = | pd.concat([df, new_followers], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = | Series(mixed) | pandas.Series |
import numpy as np
import pandas as pd
from pandas import Series
from weaverbird.backends.pandas_executor.types import DomainRetriever, PipelineExecutor
from weaverbird.pipeline.steps import AddMissingDatesStep
# cf. https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
_FREQUENCIES = {'day': 'D', 'week': 'W', 'month': 'M', 'year': 'Y'}
def at_begin_period(timestamps: Series, dates_granularity: str):
return timestamps.dt.to_period(_FREQUENCIES[dates_granularity]).dt.start_time
def execute_addmissingdates(
step: AddMissingDatesStep,
df: pd.DataFrame,
domain_retriever: DomainRetriever = None,
execute_pipeline: PipelineExecutor = None,
) -> pd.DataFrame:
if len(step.groups) > 0:
groups = df.groupby(step.groups, as_index=False, dropna=False)
else:
groups = [('', df)]
result = pd.DataFrame()
for (key, group) in groups:
# this is used to keep the real date, if it exists, instead of the computed one by pd.Grouper
group = group.assign(_old_date=group[step.dates_column])
group_with_missing_dates = group.groupby(
| pd.Grouper(key=step.dates_column, freq=_FREQUENCIES[step.dates_granularity]) | pandas.Grouper |
def op_corr(ENc_file_name,RSCU_file_name):
"""
determine the optimal codons using the correlation method described here: https://doi.org/10.1371/journal.pgen.1000556
Args:
ENc_file_name (file): file contains the ENc values for a set of genes
RSCU_file_name (file): file contains the RSCU values for a set of genes
Returns:
DataFrame contains the optimal codons
"""
#optimal codons by corr. method
import pandas as pd
import numpy as np
import scipy
from scipy import stats
from Bio.Data import CodonTable
standard_table = CodonTable.unambiguous_dna_by_id[1]
table_codons = standard_table.forward_table
enc_read_results = pd.read_csv(ENc_file_name)
enc_read_results.sort_values('gene id', inplace= True)
enc_read_results.reset_index(drop=True, inplace=True)
rscu_read_result = | pd.read_csv(RSCU_file_name) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.